code
stringlengths
2.5k
6.36M
kind
stringclasses
2 values
parsed_code
stringlengths
0
404k
quality_prob
float64
0
0.98
learning_prob
float64
0.03
1
# Непараметрические криетрии ## Терапия при анорексии В исследовании оценивается эффективность поведенческой терапии для лечения анорексии. Для 50 пациентов известен вес до начала терапии и по её окончании. Была ли терапия эффективной? ``` import numpy as np import pandas as pd import itertools from scipy import stats from statsmodels.stats.descriptivestats import sign_test from statsmodels.stats.weightstats import zconfint %pylab inline ``` ### Загрузка данных ``` weight_data = pd.read_csv('data/16_weight.txt', sep = '\t', header = 0) weight_data.head() pylab.figure(figsize=(12,4)) pylab.subplot(1,2,1) pylab.grid() pylab.hist(weight_data.Before, color = 'r') pylab.xlabel('Before') pylab.subplot(1,2,2) pylab.grid() pylab.hist(weight_data.After, color = 'b') pylab.xlabel('After') pylab.show() weight_data.describe() ``` ## Двухвыборочные критерии для связаных выборок $H_0\colon$ медианы веса до и после терапии совпадает $H_1\colon$ медианы веса до и после тепрапии отличаются ``` print '95%% confidence interval for mean weight before therapy: [%f, %f]' % zconfint(weight_data.Before) print '95%% confidence interval for mean weight after therapy: [%f, %f]' % zconfint(weight_data.After) pylab.hist(weight_data.After - weight_data.Before) pylab.show() ``` ### Критерий знаков $H_0\colon P\left(X_1>X_2\right)=\frac1{2},$ $H_1\colon P\left(X_1>X_2\right)\neq\frac1{2}$ ``` print "M: %d, p-value: %f" % sign_test(weight_data.After - weight_data.Before) ``` ### Критерий знаковых рангов Уилкоксона $H_0\colon med\left(X_1-X_2\right)=0,$ $H_1\colon med\left(X_1-X_2\right)\neq0$ ``` stats.wilcoxon(weight_data.After, weight_data.Before) stats.wilcoxon(weight_data.After - weight_data.Before) ``` ### Перестановочный критерий $H_0\colon \mathbb{E}(X_1 - X_2) = 0$ $H_1\colon \mathbb{E}(X_1 - X_2) \neq 0$ ``` def permutation_t_stat_1sample(sample, mean): t_stat = sum(map(lambda x: x - mean, sample)) return t_stat def permutation_zero_distr_1sample(sample, mean, max_permutations = None): centered_sample = map(lambda x: x - mean, sample) if max_permutations: signs_array = set([tuple(x) for x in 2 * np.random.randint(2, size = (max_permutations, len(sample))) - 1 ]) else: signs_array = itertools.product([-1, 1], repeat = len(sample)) distr = [sum(centered_sample * np.array(signs)) for signs in signs_array] return distr pylab.hist(permutation_zero_distr_1sample(weight_data.After - weight_data.Before, 0., max_permutations = 10000)) pylab.show() def permutation_test(sample, mean, max_permutations = None, alternative = 'two-sided'): if alternative not in ('two-sided', 'less', 'greater'): raise ValueError("alternative not recognized\n" "should be 'two-sided', 'less' or 'greater'") t_stat = permutation_t_stat_1sample(sample, mean) zero_distr = permutation_zero_distr_1sample(sample, mean, max_permutations) if alternative == 'two-sided': return sum([1. if abs(x) >= abs(t_stat) else 0. for x in zero_distr]) / len(zero_distr) if alternative == 'less': return sum([1. if x <= t_stat else 0. for x in zero_distr]) / len(zero_distr) if alternative == 'greater': return sum([1. if x >= t_stat else 0. for x in zero_distr]) / len(zero_distr) print "p-value: %f" % permutation_test(weight_data.After - weight_data.Before, 0., max_permutations = 1000) print "p-value: %f" % permutation_test(weight_data.After - weight_data.Before, 0., max_permutations = 50000) ```
github_jupyter
import numpy as np import pandas as pd import itertools from scipy import stats from statsmodels.stats.descriptivestats import sign_test from statsmodels.stats.weightstats import zconfint %pylab inline weight_data = pd.read_csv('data/16_weight.txt', sep = '\t', header = 0) weight_data.head() pylab.figure(figsize=(12,4)) pylab.subplot(1,2,1) pylab.grid() pylab.hist(weight_data.Before, color = 'r') pylab.xlabel('Before') pylab.subplot(1,2,2) pylab.grid() pylab.hist(weight_data.After, color = 'b') pylab.xlabel('After') pylab.show() weight_data.describe() print '95%% confidence interval for mean weight before therapy: [%f, %f]' % zconfint(weight_data.Before) print '95%% confidence interval for mean weight after therapy: [%f, %f]' % zconfint(weight_data.After) pylab.hist(weight_data.After - weight_data.Before) pylab.show() print "M: %d, p-value: %f" % sign_test(weight_data.After - weight_data.Before) stats.wilcoxon(weight_data.After, weight_data.Before) stats.wilcoxon(weight_data.After - weight_data.Before) def permutation_t_stat_1sample(sample, mean): t_stat = sum(map(lambda x: x - mean, sample)) return t_stat def permutation_zero_distr_1sample(sample, mean, max_permutations = None): centered_sample = map(lambda x: x - mean, sample) if max_permutations: signs_array = set([tuple(x) for x in 2 * np.random.randint(2, size = (max_permutations, len(sample))) - 1 ]) else: signs_array = itertools.product([-1, 1], repeat = len(sample)) distr = [sum(centered_sample * np.array(signs)) for signs in signs_array] return distr pylab.hist(permutation_zero_distr_1sample(weight_data.After - weight_data.Before, 0., max_permutations = 10000)) pylab.show() def permutation_test(sample, mean, max_permutations = None, alternative = 'two-sided'): if alternative not in ('two-sided', 'less', 'greater'): raise ValueError("alternative not recognized\n" "should be 'two-sided', 'less' or 'greater'") t_stat = permutation_t_stat_1sample(sample, mean) zero_distr = permutation_zero_distr_1sample(sample, mean, max_permutations) if alternative == 'two-sided': return sum([1. if abs(x) >= abs(t_stat) else 0. for x in zero_distr]) / len(zero_distr) if alternative == 'less': return sum([1. if x <= t_stat else 0. for x in zero_distr]) / len(zero_distr) if alternative == 'greater': return sum([1. if x >= t_stat else 0. for x in zero_distr]) / len(zero_distr) print "p-value: %f" % permutation_test(weight_data.After - weight_data.Before, 0., max_permutations = 1000) print "p-value: %f" % permutation_test(weight_data.After - weight_data.Before, 0., max_permutations = 50000)
0.468791
0.942612
# External Metadata As we learned about in our reading of [The Numbers Don't Speak for Themselves](https://data-feminism.mitpress.mit.edu/pub/czq9dfs5/release/2) in [Data Feminism](https://data-feminism.mitpress.mit.edu/) this week, the *description* of data is extremely important. Description is often recorded separately from the data as *external metadata*. External metadata is especially important for representing the context in which data was generated. We also learned previously in Chapter 6 of [The Theory and Craft of Digital Preservation](https://jhupbooks.press.jhu.edu/title/theory-and-craft-digital-preservation) that as a practical matter, this context often includes a record of what a file looks like at a particular point in time. This is known as a file's *fixity*. Knowing what files should be present, and that their content is what we expect it to be is a fundamental requirement for caring for data. It lets us notice when things have gone wrong with our data, and when things seem to be ok. ## Fixity One popular way of managing fixity information for files is to create what's called a digital fingerprint or *hash* for a file. As Owens says: > A cryptographic hash function is an algorithm that takes a given set of data (like a file) and computes a sequence of characters that then serves as a fingerprint for that data. Even changing a single bit in a file will result in a totally different sequence of characters. For example, I computed an MD5 hash for an image file which returned the value "4937A316849E472473608D43446EBF9EF". Now if I compute the has for another copy of that file and get the same result, I'll have rather high confidence that those two copies are exactly the same. Similarly, if I have a stored value from the last time I computed a hash for that file, when I recompute the hash in the future and get that value again, I have a high degreee of confidence that the file has not been chnanged or altered. If you are interested in learning more about fixity checkout the chapter on [Fixity and Checksums](https://www.dpconline.org/handbook/technical-solutions-and-tools/fixity-and-checksums) in the Digital Preservation Coalition's [Digital Preservation Handbook](https://www.dpconline.org/handbook/). ## Manifests It's not uncommon to store a list of files and their fixity values in a special file called a *manifest*. A manifest is an example of *external metadata*. The idea of a manifest is not unique to digital curation, and comes from an [older practice](https://en.wikipedia.org/wiki/Manifest_(transportation) from transportation. When shipping things long distances by boat it was (and still is) important for the shipping companies and border control officials to have a description of everything that was put on the boat at the port of departure. Below is an example of shipping manifest from the [Armenian Immigration Project](http://markarslan.org/ArmenianImmigrants/shiplists.html), for *people* who were immigrating into Ellis Island. <img src="https://raw.githubusercontent.com/edsu/inst341/master/modules/module-05/images/manifest.jpg"> The same concept that is used to track things as they move through space can be applied to things as they travel in time. A manifest simply lists the things we expect to be present, and what their state should be. In light of the D'Ignazio and Klein chapter its important to consider who the manifest is being made by, what it contains (and doesn't contain) and who it is being made for. As an analog to that question think about the shipping manifest above, and how Armenian names are westernized as they are recorded in the receipt manifest at Ellis Island. Could there be a parallel for manifests in digital preservation? What does it mean to not allow files to change in a digital preservation system? ## Generating Fixity In this notebook we will experiment with generating fixity values, and storing them in a machine reaadable manifest. We will also check the manifest to make sure the files look ok. First lets install some data to work with. We're going to use the inst341data package instead of Google Drive this week so that you can get data customized for you during the exercise. But first we're going to download the generic data for the class to use to illustrate some examples. ``` ! pip install --quiet inst341data import inst341data inst341data.get_module_5('inst341') ``` We can create a Path object for the data in the `inst341` directory that was just created on the file system. Then we can use it to print out the files in the directory. ``` from pathlib import Path data = Path('inst341') for p in data.iterdir(): print(p) ``` We will be looking at these more in a moment but for now notice that there are a bunch of numbered files with different extensions as well as a `manifest.json` file. In order to calculate the `fixity` value for one of these files we're going to create a little function that uses Python's [hashlib](https://docs.python.org/3/library/hashlib.html?highlight=hashlib#module-hashlib) module to make it easy to generate a [SHA256](https://en.wikipedia.org/wiki/SHA-2) checksum for a `Path` object. SHA256 is a hashing algorithm similar to the MD5 algorithm discussed above.| ``` import hashlib def get_sha256(p): h = hashlib.sha256() data = p.open('rb').read() h.update(data) return h.hexdigest() ``` Let's try using our `get_sha256` function by passing it a `Path` object for one of our files: ``` get_sha256(Path('inst341/492605.html')) ``` So tha value `aa0f647bb649edf138b984356098bd412fcaa724c61fa802c6e741fd33886fee` is a unique fingerprint that identifies the contents of the file stored at `inst341/492605.html`. ## Reading a Manifest The `inst341/manifest.json` file is a manifest for all the files in the `inst341` directory and their fixities stored in the [JavaScript Object Notation (JSON)](https://en.wikipedia.org/wiki/JSON) format. You probably have used JSON in your INST126 or INST326 classes since it's one of the most common data formats on the Web. This particular JSON file contains a `list` of `objects`, or as they are called in Python, `dictionaries`. Each of these dictionaries contains two key/value pairs: `path` and `sha256`. There are many different formats for manifests that are used in the digital preservation community. However no matter the representation the concept is essentially the same: you need a file name and a fixity value. Reading in our JSON manifest is relatively easy with Python's [json](https://docs.python.org/3/library/json.html) module. We just need to open the file and pass the file object to `json.load` which will parse all the JSON data into Python native data structures (a list of dictionaries) that we can then use like any list or dictionary. ``` import json manifest = json.load(open('inst341/manifest.json')) ``` Once we have read in the manifest we have a `manifest` variable that is a Python [list](https://docs.python.org/3/tutorial/datastructures.html#more-on-lists). Each element in the list is a [dictionary](https://docs.python.org/3/tutorial/datastructures.html#dictionaries) which has two keys: `path` and `sha256`. To demonstrate we can loop through each item in the list and print out the `path` and `sha256` values. ``` for entry in manifest: print(entry['path'], entry['sha256']) ``` Remember, these are the files and sha256 values *in the manifest*. Hopefully they match the files we see on the file system. But you won't know until the manifest is *validated*. ## Validate the Manifest Now lets put all the pieces together to read in our manifest (data/manifest.json) and verify that each path's sha256 values matches what is found on the file system. We do this by calculating the sha256 by giving the `get_sha256` function a Path for a file, and comparing the result with what the manifest says it should be. ``` import json import pathlib manifest = json.load(open('inst341/manifest.json')) for entry in manifest: p = pathlib.Path(entry['path']) sha256 = get_sha256(p) if sha256 == entry['sha256']: print(p, 'is ok') else: print(p, 'is invalid: found', sha256, 'but expected', entry['sha256']) ``` Whew, the manifest looks valid! All the files in the manifest have a sha256 value that matches what we find when we recalculate it using the file on the filesystem. That means our data is what we expect it to be! ## Exercise ### 1. Get Data First download your module 5 data by replacing USERNAME in the string below with your UMD username (the same one you used in the Module 3 and 2 notebooks). ``` import inst341data inst341data.get_module_5('USERNAME') ``` If that generated an error make sure you run the cell above that does the: pip install --quiet inst341data If it worked you will now have a directory named after your USERNAME in your Jupyter notebok environment. ### 2. Calculate Fixity Use the `get_sha256` function we created above to calculate and print out the the sha256 value for one of your files. You may want to print out the files in your directory first to see what filenames you have. Or if you want you can use the File Explorer built into Colab by clicking on the <img style="display: inline; height: 20px; vertical-align: bottom; margin-left: 10px; margin-right: 10px;" src="https://raw.githubusercontent.com/edsu/inst341/master/modules/module-05/images/file-explorer.png"> icon in the menu on the left side of your screen. But remember, the `get_sha256` function that we wrote takes a Path object! ### 3. Validate Your Manifest Use the example above to validate your manifest. Remember you want to validate *your* files not the ones in the `inst341` directory. Are there any files that failed validation? ### 4. **Optional:** Create a Validation Function If you'd like a challenge see if you can create a function called `validate` that is given the path to a manifest and will return True or False depending on whether the manifest is valid or not. ### 5. **Really Optional:** Efficiency Do you see any problem with the `get_sha256` function above? How could it be improved?
github_jupyter
! pip install --quiet inst341data import inst341data inst341data.get_module_5('inst341') from pathlib import Path data = Path('inst341') for p in data.iterdir(): print(p) import hashlib def get_sha256(p): h = hashlib.sha256() data = p.open('rb').read() h.update(data) return h.hexdigest() get_sha256(Path('inst341/492605.html')) import json manifest = json.load(open('inst341/manifest.json')) for entry in manifest: print(entry['path'], entry['sha256']) import json import pathlib manifest = json.load(open('inst341/manifest.json')) for entry in manifest: p = pathlib.Path(entry['path']) sha256 = get_sha256(p) if sha256 == entry['sha256']: print(p, 'is ok') else: print(p, 'is invalid: found', sha256, 'but expected', entry['sha256']) import inst341data inst341data.get_module_5('USERNAME')
0.217088
0.953794
``` from IPython.display import HTML # Cell visibility - COMPLETE: tag = HTML('''<style> div.input { display:none; } </style>''') display(tag) # #Cell visibility - TOGGLE: # tag = HTML('''<script> # code_show=true; # function code_toggle() { # if (code_show){ # $('div.input').hide() # } else { # $('div.input').show() # } # code_show = !code_show # } # $( document ).ready(code_toggle); # </script> # <p style="text-align:right"> # Toggle cell visibility <a href="javascript:code_toggle()">here</a>.</p>''') # display(tag) ``` ## Complex numbers in Cartesian form Feel free to use this interactive example to visualize complex numbers in a complex plane, utilizing the Cartesian form. Also, you can test basic mathematical operators while working with complex numbers: addition, subtraction, multiplying, and dividing. All results are presented in the respective plot, as well as in the typical mathematical notation. You can manipulate complex numbers directly on the plot (by simple clicking), or/and use input fields at the same time. In order to provide better visibility of the respective vectors in the plot widget, the complex number coefficients are limited to $\pm10$. ``` %matplotlib notebook import matplotlib.pyplot as plt import matplotlib.patches as mpatches import numpy as np import ipywidgets as widgets from IPython.display import display red_patch = mpatches.Patch(color='red', label='z1') blue_patch = mpatches.Patch(color='blue', label='z2') green_patch = mpatches.Patch(color='green', label='z1 + z2') yellow_patch = mpatches.Patch(color='yellow', label='z1 - z2') black_patch = mpatches.Patch(color='black', label='z1 * z2') magenta_patch = mpatches.Patch(color='magenta', label='z1 / z2') # Init values XLIM = 5 YLIM = 5 vectors_index_first = False; V = [None, None] V_complex = [None, None] # Complex plane fig = plt.figure(num='Complex numbers in Cartesian form') ax = fig.add_subplot(1, 1, 1) def get_interval(lim): if lim <= 10: return 1 if lim < 75: return 5 if lim > 100: return 25 return 10 def set_ticks(): XLIMc = int((XLIM / 10) + 1) * 10 YLIMc = int((YLIM / 10) + 1) * 10 if XLIMc > 150: XLIMc += 10 if YLIMc > 150: YLIMc += 10 xstep = get_interval(XLIMc) ystep = get_interval(YLIMc) #print(stepx, stepy) major_ticks = np.arange(-XLIMc, XLIMc, xstep) major_ticks_y = np.arange(-YLIMc, YLIMc, ystep) ax.set_xticks(major_ticks) ax.set_yticks(major_ticks_y) ax.grid(which='both') def clear_plot(): plt.cla() set_ticks() ax.set_xlabel('Re') ax.set_ylabel('Im') plt.ylim([-YLIM, YLIM]) plt.xlim([-XLIM, XLIM]) plt.legend(handles=[red_patch, blue_patch, green_patch, yellow_patch, black_patch, magenta_patch]) clear_plot() set_ticks() plt.show() set_ticks() # Set a complex number using direct manipulation on the plot def set_vector(i, data_x, data_y): clear_plot() V.pop(i) V.insert(i, (0, 0, round(data_x, 2), round(data_y, 2))) V_complex.pop(i) V_complex.insert(i, complex(round(data_x, 2), round(data_y, 2))) if i == 0: ax.arrow(*V[0], head_width=0.25, head_length=0.5, color="r", length_includes_head=True) a1.value = round(data_x, 2) b1.value = round(data_y, 2) if V[1] != None: ax.arrow(*V[1], head_width=0.25, head_length=0.5, color="b", length_includes_head=True) elif i == 1: ax.arrow(*V[1], head_width=0.25, head_length=0.5, color="b", length_includes_head=True) a2.value = round(data_x, 2) b2.value = round(data_y, 2) if V[0] != None: ax.arrow(*V[0], head_width=0.25, head_length=0.5, color="r", length_includes_head=True) max_bound() def onclick(event): global vectors_index_first vectors_index_first = not vectors_index_first x = event.xdata y = event.ydata if (x > 10): x = 10.0 if (x < - 10): x = -10.0 if (y > 10): y = 10.0 if (y < - 10): y = -10.0 if vectors_index_first: set_vector(0, x, y) else: set_vector(1, x, y) fig.canvas.mpl_connect('button_press_event', onclick) # Widgets a1 = widgets.BoundedFloatText(layout=widgets.Layout(width='10%'), min = -10, max = 10, step = 0.5) b1 = widgets.BoundedFloatText(layout=widgets.Layout(width='10%'), min = -10, max = 10, step = 0.5) button_set_z1 = widgets.Button(description="Plot z1") a2 = widgets.BoundedFloatText(layout=widgets.Layout(width='10%'), min = -10, max = 10, step = 0.5) b2 = widgets.BoundedFloatText(layout=widgets.Layout(width='10%'), min = -10, max = 10, step = 0.5) button_set_z2 = widgets.Button(description="Plot z2") box_layout_z1 = widgets.Layout(border='solid red', padding='10px') box_layout_z2 = widgets.Layout(border='solid blue', padding='10px') box_layout_opers = widgets.Layout(border='solid black', padding='10px') items_z1 = [widgets.Label("z1 = "), a1, widgets.Label("+ j * "), b1, button_set_z1] items_z2 = [widgets.Label("z2 = "), a2, widgets.Label("+ j * "), b2, button_set_z2] display(widgets.Box(children=items_z1, layout=box_layout_z1)) display(widgets.Box(children=items_z2, layout=box_layout_z2)) button_add = widgets.Button(description="Add") button_substract = widgets.Button(description="Subtract") button_multiply = widgets.Button(description="Multiply") button_divide = widgets.Button(description="Divide") button_reset = widgets.Button(description="Reset") output = widgets.Output() print('Complex number operations:') items_operations = [button_add, button_substract, button_multiply, button_divide, button_reset] display(widgets.Box(children=items_operations)) display(output) # Set complex number using input widgets (Text and Button) def on_button_set_z1_clicked(b): z1_old = V[0]; z1_new = (0, 0, a1.value, b1.value) if z1_old != z1_new: set_vector(0, a1.value, b1.value) change_lims() def on_button_set_z2_clicked(b): z2_old = V[1]; z2_new = (0, 0, a2.value, b2.value) if z2_old != z2_new: set_vector(1, a2.value, b2.value) change_lims() # Complex number operations: def perform_operation(oper): global XLIM, YLIM if (V_complex[0] != None) and (V_complex[1] != None): if (oper == '+'): result = V_complex[0] + V_complex[1] v_color = "g" elif (oper == '-'): result = V_complex[0] - V_complex[1] v_color = "y" elif (oper == '*'): result = V_complex[0] * V_complex[1] v_color = "black" elif (oper == '/'): result = V_complex[0] / V_complex[1] v_color = "magenta" result = complex(round(result.real, 2), round(result.imag, 2)) ax.arrow(0, 0, result.real, result.imag, head_width=0.25, head_length=0.5, color=v_color, length_includes_head=True) if abs(result.real) > XLIM: XLIM = round(abs(result.real) + 1) if abs(result.imag) > YLIM: YLIM = round(abs(result.imag) + 1) change_lims() with output: print(V_complex[0], oper, V_complex[1], "=", result) def on_button_add_clicked(b): perform_operation("+") def on_button_substract_clicked(b): perform_operation("-") def on_button_multiply_clicked(b): perform_operation("*") def on_button_divide_clicked(b): perform_operation("/") # Plot init methods def on_button_reset_clicked(b): global V, V_complex, XLIM, YLIM with output: output.clear_output() clear_plot() vectors_index_first = False; V = [None, None] V_complex = [None, None] a1.value = 0 b1.value = 0 a2.value = 0 b2.value = 0 XLIM = 5 YLIM = 5 change_lims() def clear_plot(): plt.cla() set_ticks() ax.set_xlabel('Re') ax.set_ylabel('Im') plt.ylim([-YLIM, YLIM]) plt.xlim([-XLIM, XLIM]) plt.legend(handles=[red_patch, blue_patch, green_patch, yellow_patch, black_patch, magenta_patch]) def change_lims(): set_ticks() plt.ylim([-YLIM, YLIM]) plt.xlim([-XLIM, XLIM]) set_ticks() def max_bound(): global XLIM, YLIM mx = 0 my = 0 if V_complex[0] != None: z = V_complex[0] if abs(z.real) > mx: mx = abs(z.real) if abs(z.imag) > my: my = abs(z.imag) if V_complex[1] != None: z = V_complex[1] if abs(z.real) > mx: mx = abs(z.real) if abs(z.imag) > my: my = abs(z.imag) if mx > XLIM: XLIM = round(mx + 1) elif mx <=5: XLIM = 5 if my > YLIM: YLIM = round(my + 1) elif my <=5: YLIM = 5 change_lims() # Button events button_set_z1.on_click(on_button_set_z1_clicked) button_set_z2.on_click(on_button_set_z2_clicked) button_add.on_click(on_button_add_clicked) button_substract.on_click(on_button_substract_clicked) button_multiply.on_click(on_button_multiply_clicked) button_divide.on_click(on_button_divide_clicked) button_reset.on_click(on_button_reset_clicked) ```
github_jupyter
from IPython.display import HTML # Cell visibility - COMPLETE: tag = HTML('''<style> div.input { display:none; } </style>''') display(tag) # #Cell visibility - TOGGLE: # tag = HTML('''<script> # code_show=true; # function code_toggle() { # if (code_show){ # $('div.input').hide() # } else { # $('div.input').show() # } # code_show = !code_show # } # $( document ).ready(code_toggle); # </script> # <p style="text-align:right"> # Toggle cell visibility <a href="javascript:code_toggle()">here</a>.</p>''') # display(tag) %matplotlib notebook import matplotlib.pyplot as plt import matplotlib.patches as mpatches import numpy as np import ipywidgets as widgets from IPython.display import display red_patch = mpatches.Patch(color='red', label='z1') blue_patch = mpatches.Patch(color='blue', label='z2') green_patch = mpatches.Patch(color='green', label='z1 + z2') yellow_patch = mpatches.Patch(color='yellow', label='z1 - z2') black_patch = mpatches.Patch(color='black', label='z1 * z2') magenta_patch = mpatches.Patch(color='magenta', label='z1 / z2') # Init values XLIM = 5 YLIM = 5 vectors_index_first = False; V = [None, None] V_complex = [None, None] # Complex plane fig = plt.figure(num='Complex numbers in Cartesian form') ax = fig.add_subplot(1, 1, 1) def get_interval(lim): if lim <= 10: return 1 if lim < 75: return 5 if lim > 100: return 25 return 10 def set_ticks(): XLIMc = int((XLIM / 10) + 1) * 10 YLIMc = int((YLIM / 10) + 1) * 10 if XLIMc > 150: XLIMc += 10 if YLIMc > 150: YLIMc += 10 xstep = get_interval(XLIMc) ystep = get_interval(YLIMc) #print(stepx, stepy) major_ticks = np.arange(-XLIMc, XLIMc, xstep) major_ticks_y = np.arange(-YLIMc, YLIMc, ystep) ax.set_xticks(major_ticks) ax.set_yticks(major_ticks_y) ax.grid(which='both') def clear_plot(): plt.cla() set_ticks() ax.set_xlabel('Re') ax.set_ylabel('Im') plt.ylim([-YLIM, YLIM]) plt.xlim([-XLIM, XLIM]) plt.legend(handles=[red_patch, blue_patch, green_patch, yellow_patch, black_patch, magenta_patch]) clear_plot() set_ticks() plt.show() set_ticks() # Set a complex number using direct manipulation on the plot def set_vector(i, data_x, data_y): clear_plot() V.pop(i) V.insert(i, (0, 0, round(data_x, 2), round(data_y, 2))) V_complex.pop(i) V_complex.insert(i, complex(round(data_x, 2), round(data_y, 2))) if i == 0: ax.arrow(*V[0], head_width=0.25, head_length=0.5, color="r", length_includes_head=True) a1.value = round(data_x, 2) b1.value = round(data_y, 2) if V[1] != None: ax.arrow(*V[1], head_width=0.25, head_length=0.5, color="b", length_includes_head=True) elif i == 1: ax.arrow(*V[1], head_width=0.25, head_length=0.5, color="b", length_includes_head=True) a2.value = round(data_x, 2) b2.value = round(data_y, 2) if V[0] != None: ax.arrow(*V[0], head_width=0.25, head_length=0.5, color="r", length_includes_head=True) max_bound() def onclick(event): global vectors_index_first vectors_index_first = not vectors_index_first x = event.xdata y = event.ydata if (x > 10): x = 10.0 if (x < - 10): x = -10.0 if (y > 10): y = 10.0 if (y < - 10): y = -10.0 if vectors_index_first: set_vector(0, x, y) else: set_vector(1, x, y) fig.canvas.mpl_connect('button_press_event', onclick) # Widgets a1 = widgets.BoundedFloatText(layout=widgets.Layout(width='10%'), min = -10, max = 10, step = 0.5) b1 = widgets.BoundedFloatText(layout=widgets.Layout(width='10%'), min = -10, max = 10, step = 0.5) button_set_z1 = widgets.Button(description="Plot z1") a2 = widgets.BoundedFloatText(layout=widgets.Layout(width='10%'), min = -10, max = 10, step = 0.5) b2 = widgets.BoundedFloatText(layout=widgets.Layout(width='10%'), min = -10, max = 10, step = 0.5) button_set_z2 = widgets.Button(description="Plot z2") box_layout_z1 = widgets.Layout(border='solid red', padding='10px') box_layout_z2 = widgets.Layout(border='solid blue', padding='10px') box_layout_opers = widgets.Layout(border='solid black', padding='10px') items_z1 = [widgets.Label("z1 = "), a1, widgets.Label("+ j * "), b1, button_set_z1] items_z2 = [widgets.Label("z2 = "), a2, widgets.Label("+ j * "), b2, button_set_z2] display(widgets.Box(children=items_z1, layout=box_layout_z1)) display(widgets.Box(children=items_z2, layout=box_layout_z2)) button_add = widgets.Button(description="Add") button_substract = widgets.Button(description="Subtract") button_multiply = widgets.Button(description="Multiply") button_divide = widgets.Button(description="Divide") button_reset = widgets.Button(description="Reset") output = widgets.Output() print('Complex number operations:') items_operations = [button_add, button_substract, button_multiply, button_divide, button_reset] display(widgets.Box(children=items_operations)) display(output) # Set complex number using input widgets (Text and Button) def on_button_set_z1_clicked(b): z1_old = V[0]; z1_new = (0, 0, a1.value, b1.value) if z1_old != z1_new: set_vector(0, a1.value, b1.value) change_lims() def on_button_set_z2_clicked(b): z2_old = V[1]; z2_new = (0, 0, a2.value, b2.value) if z2_old != z2_new: set_vector(1, a2.value, b2.value) change_lims() # Complex number operations: def perform_operation(oper): global XLIM, YLIM if (V_complex[0] != None) and (V_complex[1] != None): if (oper == '+'): result = V_complex[0] + V_complex[1] v_color = "g" elif (oper == '-'): result = V_complex[0] - V_complex[1] v_color = "y" elif (oper == '*'): result = V_complex[0] * V_complex[1] v_color = "black" elif (oper == '/'): result = V_complex[0] / V_complex[1] v_color = "magenta" result = complex(round(result.real, 2), round(result.imag, 2)) ax.arrow(0, 0, result.real, result.imag, head_width=0.25, head_length=0.5, color=v_color, length_includes_head=True) if abs(result.real) > XLIM: XLIM = round(abs(result.real) + 1) if abs(result.imag) > YLIM: YLIM = round(abs(result.imag) + 1) change_lims() with output: print(V_complex[0], oper, V_complex[1], "=", result) def on_button_add_clicked(b): perform_operation("+") def on_button_substract_clicked(b): perform_operation("-") def on_button_multiply_clicked(b): perform_operation("*") def on_button_divide_clicked(b): perform_operation("/") # Plot init methods def on_button_reset_clicked(b): global V, V_complex, XLIM, YLIM with output: output.clear_output() clear_plot() vectors_index_first = False; V = [None, None] V_complex = [None, None] a1.value = 0 b1.value = 0 a2.value = 0 b2.value = 0 XLIM = 5 YLIM = 5 change_lims() def clear_plot(): plt.cla() set_ticks() ax.set_xlabel('Re') ax.set_ylabel('Im') plt.ylim([-YLIM, YLIM]) plt.xlim([-XLIM, XLIM]) plt.legend(handles=[red_patch, blue_patch, green_patch, yellow_patch, black_patch, magenta_patch]) def change_lims(): set_ticks() plt.ylim([-YLIM, YLIM]) plt.xlim([-XLIM, XLIM]) set_ticks() def max_bound(): global XLIM, YLIM mx = 0 my = 0 if V_complex[0] != None: z = V_complex[0] if abs(z.real) > mx: mx = abs(z.real) if abs(z.imag) > my: my = abs(z.imag) if V_complex[1] != None: z = V_complex[1] if abs(z.real) > mx: mx = abs(z.real) if abs(z.imag) > my: my = abs(z.imag) if mx > XLIM: XLIM = round(mx + 1) elif mx <=5: XLIM = 5 if my > YLIM: YLIM = round(my + 1) elif my <=5: YLIM = 5 change_lims() # Button events button_set_z1.on_click(on_button_set_z1_clicked) button_set_z2.on_click(on_button_set_z2_clicked) button_add.on_click(on_button_add_clicked) button_substract.on_click(on_button_substract_clicked) button_multiply.on_click(on_button_multiply_clicked) button_divide.on_click(on_button_divide_clicked) button_reset.on_click(on_button_reset_clicked)
0.305076
0.741721
``` import numpy as np import matplotlib.dates as dts import pandas as pd import fredpy as fp import datetime,dateutil,urllib,runProcs import requests import matplotlib.pyplot as plt plt.style.use('classic') %matplotlib inline # 1. Import the most recent inflation forecast data from the Philadelphia Fed, Survey of Professional Forecasters # Get data here: https://www.philadelphiafed.org/surveys-and-data/real-time-data-research/inflation-forecasts url = "https://www.philadelphiafed.org/-/media/frbp/assets/surveys-and-data/survey-of-professional-forecasters/historical-data/inflation.xlsx?la=en&hash=F9C3E76769B4586C3E36E403DFA54BDC" r = requests.get(url,verify=False) with open("../xls/inflation_forecasts.xls", "wb") as code: code.write(r.content) # dls = "http://www.philadelphiafed.org/research-and-data/real-time-center/survey-of-professional-forecasters/historical-data/inflation.xls" # urllib.urlretrieve(dls, "inflation_forecasts.xls") # 2. Download and manage data from FRED gdp_deflator_Q=fp.series('GDPDEF') gdp_deflator_A=fp.series('A191RD3A086NBEA') gdp_deflator_Q = gdp_deflator_Q.apc(method='forward') gdp_deflator_A = gdp_deflator_A.apc(method='forward') gdp_deflator_Q = gdp_deflator_Q.window(['07-01-1970','01-01-2200']) gdp_deflator_A = gdp_deflator_A.window(['07-01-1970','01-01-2200']) interest_Q = fp.series('GS1') interest_A = fp.series('GS1') interest_Q = interest_Q.as_frequency(freq='Q',method='mean') interest_A = interest_A.as_frequency(freq='A',method='mean') interest_Q = interest_Q.window(['07-01-1970','01-01-2200']) interest_A = interest_A.window(['07-01-1970','01-01-2200']) # 3. Create forecast series as FRED objects # 3.1 import the inflation forecasts from Excel file and fill in missing value for 1974:Q3 inflation_forecasts = pd.read_excel('../xls/inflation_forecasts.xls') inflation_forecasts['INFPGDP1YR']=inflation_forecasts['INFPGDP1YR'].interpolate() # 3.2 initialize some FRED objects # gdp_deflator_forecast_Q=fp.series('GDPDEF') # gdp_deflator_forecast_A=fp.series('GDPDEF') # 3.3 Associate forecasts with dates. The date should coincide with the start of the period for which the forecast applies. dates = [] for i,ind in enumerate(inflation_forecasts.index): year =int(inflation_forecasts.iloc[i]['YEAR']) quart=int(inflation_forecasts.iloc[i]['QUARTER']) if quart == 1: month = '04' elif quart == 2: month = '07' elif quart == 3: month = '10' else: month = '01' year=year+1 date = month+'-01-'+str(year) dates.append(date) # dateNumbers = [dateutil.parser.parse(s) for s in dates] # # 3.4 Create the FRED objects gdp_deflator_forecast_Q = fp.to_fred_series(data = inflation_forecasts['INFPGDP1YR'].values,dates=dates,frequency_short='Q') gdp_deflator_forecast_A = fp.to_fred_series(data = inflation_forecasts['INFPGDP1YR'].values,dates=dates,frequency_short='A') gdp_deflator_forecast_A = gdp_deflator_forecast_A.as_frequency(freq='A',method='mean') # 3.5 Create data frames with forecast inflation, actual inflation, and the 1-year bond rate gdp_deflator_Q,gdp_deflator_forecast_Q,interest_Q = fp.window_equalize([gdp_deflator_Q,gdp_deflator_forecast_Q,interest_Q]) gdp_deflator_A,gdp_deflator_forecast_A,interest_A = fp.window_equalize([gdp_deflator_A,gdp_deflator_forecast_A,interest_A]) inflation_forecast_Q_df=pd.DataFrame({'1-year inflation forecast':gdp_deflator_forecast_Q.data,'1-year actual inflation':gdp_deflator_Q.data,'1-year nominal interest rate':interest_Q.data}) inflation_forecast_A_df=pd.DataFrame({'1-year inflation forecast':gdp_deflator_forecast_A.data,'1-year actual inflation':gdp_deflator_A.data,'1-year nominal interest rate':interest_A.data}) # 3.6 Save data to csv inflation_forecast_Q_df.to_csv('../csv/inflation_forecastsQ.csv',index=True,index_label='date') inflation_forecast_A_df.to_csv('../csv/inflation_forecastsA.csv',index=True,index_label='date') # 4. Plot some things # 4.1 actual inflation, expected inflation, 1-year interest rate: quarterly fig=plt.figure() ax=fig.add_subplot(1,1,1) ax.plot(gdp_deflator_Q.data,'b-',lw=3) ax.plot(gdp_deflator_forecast_Q.data,'r--',lw=3) ax.plot(interest_Q.data,'m-.',lw=3) ax.set_title('Quarterly') ax.set_xlabel('Date') ax.set_ylabel('%') ax.legend(['actual $\pi$','forecast $\pi$','interest'],loc='upper right') # interest_Q.recessions() plt.grid() # 4.2 actual inflation, expected inflation, 1-year interest rate: annual fig=plt.figure() ax=fig.add_subplot(1,1,1) ax.plot(gdp_deflator_A.data,'b-o',lw=3) ax.plot(gdp_deflator_forecast_A.data,'r--o',lw=3) ax.plot(interest_A.data,'m-.o',lw=3) ax.set_title('Annual') ax.set_xlabel('Date') ax.set_ylabel('%') ax.legend(['actual $\pi$','forecast $\pi$','interest'],loc='upper right') # interest_A.recessions() plt.grid() # 5. Real interest rates # 5.1 Construct real interest rate series: ex ante and ex post real_ex_ante_A = interest_A.data - gdp_deflator_forecast_A.data real_ex_post_A = interest_A.data - gdp_deflator_A.data real_ex_ante_Q = interest_Q.data - gdp_deflator_forecast_Q.data real_ex_post_Q = interest_Q.data - gdp_deflator_Q.data # 5.2 ex ante and ex post real interest rates: annual fig=plt.figure() ax=fig.add_subplot(1,1,1) ax.plot(real_ex_ante_A,'b-o',lw=3) ax.plot(real_ex_post_A,'r--o',lw=3) ax.set_title('Annual real interest rate') ax.set_xlabel('Date') ax.set_ylabel('%') ax.legend(['ex ante','ex post'],loc='upper right') # interest_A.recessions() plt.grid() # 5.2 ex ante and ex post real interest rates: quarterly fig=plt.figure() ax=fig.add_subplot(1,1,1) ax.plot(real_ex_ante_Q,'b-',lw=3) ax.plot(real_ex_post_Q,'r--',lw=3) ax.set_title('Quarterly real interest rate') ax.set_xlabel('Date') ax.set_ylabel('%') ax.legend(['ex ante','ex post'],loc='upper right') # interest_Q.recessions() plt.grid() # # 6. Consumption Euler equation # 6.1 create the consumption series cons=fp.series('PCECA') defl=fp.series('A191RD3A086NBEA') cons,defl = fp.window_equalize([cons,defl]) cons = cons.pc(method='backward') interest_A,cons = fp.window_equalize([interest_A,cons]) # 6.2 Predicted real interest rate: sigma = 1 sigma = 1 beta = .98 gc=np.mean(cons.data) r_pred_A = sigma*np.array(cons.data - np.mean(cons.data)) - 100*np.log(beta) print(gc) r_pred_A # 6.3 Plot the predicted real interest rate fig=plt.figure() ax=fig.add_subplot(1,1,1) ax.plot(real_ex_ante_A,'b-',lw=3) ax.plot(real_ex_ante_A.index,r_pred_A,'r--',lw=3) ax.set_title('Annual ex ante real interest rate') ax.set_xlabel('Date') ax.set_ylabel('%') ax.legend(['actual','predicted'],loc='upper right') # interest_A.recessions() plt.grid() np.corrcoef(cons.data, real_ex_ante_A) # 7. Export to notebook to .py runProcs.exportNb('inflation_forecasts') ```
github_jupyter
import numpy as np import matplotlib.dates as dts import pandas as pd import fredpy as fp import datetime,dateutil,urllib,runProcs import requests import matplotlib.pyplot as plt plt.style.use('classic') %matplotlib inline # 1. Import the most recent inflation forecast data from the Philadelphia Fed, Survey of Professional Forecasters # Get data here: https://www.philadelphiafed.org/surveys-and-data/real-time-data-research/inflation-forecasts url = "https://www.philadelphiafed.org/-/media/frbp/assets/surveys-and-data/survey-of-professional-forecasters/historical-data/inflation.xlsx?la=en&hash=F9C3E76769B4586C3E36E403DFA54BDC" r = requests.get(url,verify=False) with open("../xls/inflation_forecasts.xls", "wb") as code: code.write(r.content) # dls = "http://www.philadelphiafed.org/research-and-data/real-time-center/survey-of-professional-forecasters/historical-data/inflation.xls" # urllib.urlretrieve(dls, "inflation_forecasts.xls") # 2. Download and manage data from FRED gdp_deflator_Q=fp.series('GDPDEF') gdp_deflator_A=fp.series('A191RD3A086NBEA') gdp_deflator_Q = gdp_deflator_Q.apc(method='forward') gdp_deflator_A = gdp_deflator_A.apc(method='forward') gdp_deflator_Q = gdp_deflator_Q.window(['07-01-1970','01-01-2200']) gdp_deflator_A = gdp_deflator_A.window(['07-01-1970','01-01-2200']) interest_Q = fp.series('GS1') interest_A = fp.series('GS1') interest_Q = interest_Q.as_frequency(freq='Q',method='mean') interest_A = interest_A.as_frequency(freq='A',method='mean') interest_Q = interest_Q.window(['07-01-1970','01-01-2200']) interest_A = interest_A.window(['07-01-1970','01-01-2200']) # 3. Create forecast series as FRED objects # 3.1 import the inflation forecasts from Excel file and fill in missing value for 1974:Q3 inflation_forecasts = pd.read_excel('../xls/inflation_forecasts.xls') inflation_forecasts['INFPGDP1YR']=inflation_forecasts['INFPGDP1YR'].interpolate() # 3.2 initialize some FRED objects # gdp_deflator_forecast_Q=fp.series('GDPDEF') # gdp_deflator_forecast_A=fp.series('GDPDEF') # 3.3 Associate forecasts with dates. The date should coincide with the start of the period for which the forecast applies. dates = [] for i,ind in enumerate(inflation_forecasts.index): year =int(inflation_forecasts.iloc[i]['YEAR']) quart=int(inflation_forecasts.iloc[i]['QUARTER']) if quart == 1: month = '04' elif quart == 2: month = '07' elif quart == 3: month = '10' else: month = '01' year=year+1 date = month+'-01-'+str(year) dates.append(date) # dateNumbers = [dateutil.parser.parse(s) for s in dates] # # 3.4 Create the FRED objects gdp_deflator_forecast_Q = fp.to_fred_series(data = inflation_forecasts['INFPGDP1YR'].values,dates=dates,frequency_short='Q') gdp_deflator_forecast_A = fp.to_fred_series(data = inflation_forecasts['INFPGDP1YR'].values,dates=dates,frequency_short='A') gdp_deflator_forecast_A = gdp_deflator_forecast_A.as_frequency(freq='A',method='mean') # 3.5 Create data frames with forecast inflation, actual inflation, and the 1-year bond rate gdp_deflator_Q,gdp_deflator_forecast_Q,interest_Q = fp.window_equalize([gdp_deflator_Q,gdp_deflator_forecast_Q,interest_Q]) gdp_deflator_A,gdp_deflator_forecast_A,interest_A = fp.window_equalize([gdp_deflator_A,gdp_deflator_forecast_A,interest_A]) inflation_forecast_Q_df=pd.DataFrame({'1-year inflation forecast':gdp_deflator_forecast_Q.data,'1-year actual inflation':gdp_deflator_Q.data,'1-year nominal interest rate':interest_Q.data}) inflation_forecast_A_df=pd.DataFrame({'1-year inflation forecast':gdp_deflator_forecast_A.data,'1-year actual inflation':gdp_deflator_A.data,'1-year nominal interest rate':interest_A.data}) # 3.6 Save data to csv inflation_forecast_Q_df.to_csv('../csv/inflation_forecastsQ.csv',index=True,index_label='date') inflation_forecast_A_df.to_csv('../csv/inflation_forecastsA.csv',index=True,index_label='date') # 4. Plot some things # 4.1 actual inflation, expected inflation, 1-year interest rate: quarterly fig=plt.figure() ax=fig.add_subplot(1,1,1) ax.plot(gdp_deflator_Q.data,'b-',lw=3) ax.plot(gdp_deflator_forecast_Q.data,'r--',lw=3) ax.plot(interest_Q.data,'m-.',lw=3) ax.set_title('Quarterly') ax.set_xlabel('Date') ax.set_ylabel('%') ax.legend(['actual $\pi$','forecast $\pi$','interest'],loc='upper right') # interest_Q.recessions() plt.grid() # 4.2 actual inflation, expected inflation, 1-year interest rate: annual fig=plt.figure() ax=fig.add_subplot(1,1,1) ax.plot(gdp_deflator_A.data,'b-o',lw=3) ax.plot(gdp_deflator_forecast_A.data,'r--o',lw=3) ax.plot(interest_A.data,'m-.o',lw=3) ax.set_title('Annual') ax.set_xlabel('Date') ax.set_ylabel('%') ax.legend(['actual $\pi$','forecast $\pi$','interest'],loc='upper right') # interest_A.recessions() plt.grid() # 5. Real interest rates # 5.1 Construct real interest rate series: ex ante and ex post real_ex_ante_A = interest_A.data - gdp_deflator_forecast_A.data real_ex_post_A = interest_A.data - gdp_deflator_A.data real_ex_ante_Q = interest_Q.data - gdp_deflator_forecast_Q.data real_ex_post_Q = interest_Q.data - gdp_deflator_Q.data # 5.2 ex ante and ex post real interest rates: annual fig=plt.figure() ax=fig.add_subplot(1,1,1) ax.plot(real_ex_ante_A,'b-o',lw=3) ax.plot(real_ex_post_A,'r--o',lw=3) ax.set_title('Annual real interest rate') ax.set_xlabel('Date') ax.set_ylabel('%') ax.legend(['ex ante','ex post'],loc='upper right') # interest_A.recessions() plt.grid() # 5.2 ex ante and ex post real interest rates: quarterly fig=plt.figure() ax=fig.add_subplot(1,1,1) ax.plot(real_ex_ante_Q,'b-',lw=3) ax.plot(real_ex_post_Q,'r--',lw=3) ax.set_title('Quarterly real interest rate') ax.set_xlabel('Date') ax.set_ylabel('%') ax.legend(['ex ante','ex post'],loc='upper right') # interest_Q.recessions() plt.grid() # # 6. Consumption Euler equation # 6.1 create the consumption series cons=fp.series('PCECA') defl=fp.series('A191RD3A086NBEA') cons,defl = fp.window_equalize([cons,defl]) cons = cons.pc(method='backward') interest_A,cons = fp.window_equalize([interest_A,cons]) # 6.2 Predicted real interest rate: sigma = 1 sigma = 1 beta = .98 gc=np.mean(cons.data) r_pred_A = sigma*np.array(cons.data - np.mean(cons.data)) - 100*np.log(beta) print(gc) r_pred_A # 6.3 Plot the predicted real interest rate fig=plt.figure() ax=fig.add_subplot(1,1,1) ax.plot(real_ex_ante_A,'b-',lw=3) ax.plot(real_ex_ante_A.index,r_pred_A,'r--',lw=3) ax.set_title('Annual ex ante real interest rate') ax.set_xlabel('Date') ax.set_ylabel('%') ax.legend(['actual','predicted'],loc='upper right') # interest_A.recessions() plt.grid() np.corrcoef(cons.data, real_ex_ante_A) # 7. Export to notebook to .py runProcs.exportNb('inflation_forecasts')
0.497559
0.590986
<img style="float: right; margin: 0px 0px 0px 0px;" src="https://www.curiosite.es/img/auto_catalogo/w750/1702.jpg" width="300px" height="200px" /> # **Proyecto modulo 2** ## **1.1 Máquina tragamonedas** ## **1.2 Objetivos ** ### **1.2.1 Objetivo general** - Utilizar numeros *pseudoaleatorios* para crear un juego de casino. ### **1.2.2 Objetivos específicos** - Demostrar la aplicación de los numeros pseudoaleatorios en la vida real. - Simular un juego común en los casino. - Generar una interacción entre usuario-máquina. ## **1.3 Modelo que representa el problema ** ### **Nuestra máquina** Las máquinas tragamonedas son máquinas que a cambio de una cantidad de dinero apostado dan un tiempo de juego y eventualmente un premio en efectivo. Son uno de los juegos electrónicos para ganar dinero, más conocidos y más utilizados desde mediados del siglo pasado. Estas máquinas pueden ser de dos tipos: * **Programadas:** en estas máquinas el premio depende de un programa interno en la máquina, de tal forma que al cabo de una secuencia de jugadas la máquina ha de devolver una cantidad determinada de lo que se ha metido en ella. Este tipo de máquinas son habituales de los salones de juego y en algunos países también en bares o cafeterías.**<font color=green>Usaremos este tipo** * **De azar:** en estas máquinas los premios dependen exclusivamente del azar. Para conocer el porcentaje de pago de estas máquinas hay que acudir a la estadística y la probabilidad. Sólo se suelen encontrar en salones de juego de los casinos. ### **Aplicación de números aleatorios** Los Generadores de Números Aleatorios (GNAs) llamados también RNG de su abraviatura del inglés (Random Number Generator) son cruciales para la industria del juego online. De hecho, los RNG son el latido de corazón de todo el negocio y sin ellos los casinos en línea simplemente no serían justos o lugares divertidos para jugar. En lugares presenciales, los crupieres aleatorizan las cartas barajándolas. Esto se puede hacer usando una sola baraja después de cada mano o, para juegos como el blackjack, cargando el zapato de casino con hasta ocho barajas de cartas mezcladas previamente. En casinos con generador de números aleatorios, el proceso de barajar es reemplazado por una formula matemática para garantizar que cada carta repartida, dado tirado o rueda girada, sean completamente aleatorios y, por lo tanto, impredecibles. Tradicionalmente, si necesitabas un número aleatorio, utilizarías un método similar al que ves en un casino físico: barajar cartas, lanzar dados o incluso lanzar monedas. Las loterías y los juegos de bingo son esencialmente juegos de RNG también. Como la necesidad de cantidades mayores de números aleatorios se volvió más urgente en las estadísticas, se desarrollaron RNG computarizados. Los casinos RNG usan lo que se conoce como generador de números pseudoaleatorios (PRNG). El algoritmo puede crear largas cadenas de números aleatorios automáticamente, pero todas las series están determinadas por un número fijo llamado semilla. Al manipular la semilla, los desarrolladores pueden controlar el retorno al jugador en los juegos de casino. <img style="float: margin: 0px 0px 0px 0px;" src="http://www.mejorcasino.org/files/infografia-rng.jpg" width="800px" height="1000 px" /> Como puedes ver en la imagen de arriba, todo este proceso de creación de números aleatorios para generar resultados justos es bastante complejo. Lograr la verdadera aleatoriedad es muy difícil, pero con la tecnología y los sistemas informáticos modernos, los matemáticos y los estadísticos se han acercado lo suficiente como para que podamos disfrutar de los juegos de casino sin preocupaciones. ## **1.4 Simulaciones ** ``` import random import time import os time.sleep(10) #Constants: INIT_STAKE = 50 INIT_BALANCE = 1000 ITEMS = ["CHERRY", "LEMON", "ORANGE", "PLUM", "BELL", "BAR", "7"] firstWheel = None secondWheel = None thirdWheel = None stake = INIT_STAKE balance = INIT_BALANCE def play(): global stake, firstWheel, secondWheel, thirdWheel playQuestion = askPlayer() while(stake != 0 and playQuestion == True): firstWheel = spinWheel() secondWheel = spinWheel() thirdWheel = spinWheel() printScore() playQuestion = askPlayer() def askPlayer(): ''' Le pregunta al jugador si quiere volver a jugar. esperando que el usuario responda con sí, y, no o n No hay sensibilidad a mayúsculas en la respuesta. sí, sí, y, y, no. . . todas las obras ''' global stake global balance while(True): os.system('cls' if os.name == 'nt' else 'clear') if (balance <=1): print ("Reinicio de la máquina.") balance = 1000 print ("El Jackpot es actualmente: $" + str(balance) + ".") answer = input("¿Quisieras jugar? ¿O revisar tu dinero? ") answer = answer.lower() if(answer == "si" or answer == "y"): return True elif(answer == "no" or answer == "n"): print("Terminaste el juego con $" + str(stake) + " en tu mano. Gran trabajo!") time.sleep(5) return False elif(answer == "check" or answer == "CHECK"): print ("Tu Actualmente tienes $" + str(stake) + ".") else: print("Whoops! No entendi eso.") def spinWheel(): ''' returns a random item from the wheel ''' randomNumber = random.randint(0, 5) return ITEMS[randomNumber] def printScore(): ''' prints the current score ''' global stake, firstWheel, secondWheel, thirdWheel, balance if((firstWheel == "CHERRY") and (secondWheel != "CHERRY")): win = 2 balance = balance - 2 elif((firstWheel == "CHERRY") and (secondWheel == "CHERRY") and (thirdWheel != "CHERRY")): win = 5 balance = balance - 5 elif((firstWheel == "CHERRY") and (secondWheel == "CHERRY") and (thirdWheel == "CHERRY")): win = 7 balance = balance - 7 elif((firstWheel == "ORANGE") and (secondWheel == "ORANGE") and ((thirdWheel == "ORANGE") or (thirdWheel == "BAR"))): win = 10 balance = balance - 10 elif((firstWheel == "PLUM") and (secondWheel == "PLUM") and ((thirdWheel == "PLUM") or (thirdWheel == "BAR"))): win = 14 balance = balance - 14 elif((firstWheel == "BELL") and (secondWheel == "BELL") and ((thirdWheel == "BELL") or (thirdWheel == "BAR"))): win = 20 balance = balance - 20 elif((firstWheel == "BAR") and (secondWheel == "BAR") and (thirdWheel == "BAR")): win = 250 balance = balance - 250 elif((firstWheel == "7") and (secondWheel == "7") and (thridWheel == "7")): win = balance balance = balance - win else: win = -1 balance = balance + 1 stake += win if win == balance: print ("Ganaste el JACKPOT!!") if(win > 0): print(firstWheel + '\t' + secondWheel + '\t' + thirdWheel + ' -- Ganaste $' + str(win)) time.sleep(3) os.system('cls' if os.name == 'nt' else 'clear') else: print(firstWheel + '\t' + secondWheel + '\t' + thirdWheel + ' -- Perdiste') time.sleep(2) os.system('cls' if os.name == 'nt' else 'clear') ``` ## **1.5 Visualización de resultados de simulación** ``` print() print('''Bienvenido a la máquina tragamonedas Comenzarás con $ 50 pesos. Se te preguntará si quieres jugar. Responda con sí / no. también puedes usar y / n No hay sensibilidad de mayúsculas, escríbela como quieras! Para ganar debes obtener una de las siguientes combinaciones: BAR\tBAR\tBAR\t\tpays\t$250 BELL\tBELL\tBELL/BAR\tpays\t$20 PLUM\tPLUM\tPLUM/BAR\tpays\t$14 ORANGE\tORANGE\tORANGE/BAR\tpays\t$10 CHERRY\tCHERRY\tCHERRY\t\tpays\t$7 CHERRY\tCHERRY\t -\t\tpays\t$5 CHERRY\t -\t -\t\tpays\t$2 7\t 7\t 7\t\tpays\t The Jackpot! ''') play() ``` ## **1.6 Conclusiones** * Como pudimos observar, los números pseudoaleatorios tienen muchas funciones en la vida real y son muy utilizados en los juegos de azar. * Las maquinas tragamonedas de los casinos utilizan programas más avanzados para realizar un procedimiento similar. * Logramos generar una interacción entre el usuario y la maquina para poder lograr los juegos. ## **1.7 Referencias:** - http://www.notiserver.com/3086--Que-son-las-Maquinas-Tragamonedas-y-como-ganar-dinero-en-ellas- - https://es.wikipedia.org/wiki/M%C3%A1quinas_tragamonedas - http://www.mejorcasino.org/guia/rng/
github_jupyter
import random import time import os time.sleep(10) #Constants: INIT_STAKE = 50 INIT_BALANCE = 1000 ITEMS = ["CHERRY", "LEMON", "ORANGE", "PLUM", "BELL", "BAR", "7"] firstWheel = None secondWheel = None thirdWheel = None stake = INIT_STAKE balance = INIT_BALANCE def play(): global stake, firstWheel, secondWheel, thirdWheel playQuestion = askPlayer() while(stake != 0 and playQuestion == True): firstWheel = spinWheel() secondWheel = spinWheel() thirdWheel = spinWheel() printScore() playQuestion = askPlayer() def askPlayer(): ''' Le pregunta al jugador si quiere volver a jugar. esperando que el usuario responda con sí, y, no o n No hay sensibilidad a mayúsculas en la respuesta. sí, sí, y, y, no. . . todas las obras ''' global stake global balance while(True): os.system('cls' if os.name == 'nt' else 'clear') if (balance <=1): print ("Reinicio de la máquina.") balance = 1000 print ("El Jackpot es actualmente: $" + str(balance) + ".") answer = input("¿Quisieras jugar? ¿O revisar tu dinero? ") answer = answer.lower() if(answer == "si" or answer == "y"): return True elif(answer == "no" or answer == "n"): print("Terminaste el juego con $" + str(stake) + " en tu mano. Gran trabajo!") time.sleep(5) return False elif(answer == "check" or answer == "CHECK"): print ("Tu Actualmente tienes $" + str(stake) + ".") else: print("Whoops! No entendi eso.") def spinWheel(): ''' returns a random item from the wheel ''' randomNumber = random.randint(0, 5) return ITEMS[randomNumber] def printScore(): ''' prints the current score ''' global stake, firstWheel, secondWheel, thirdWheel, balance if((firstWheel == "CHERRY") and (secondWheel != "CHERRY")): win = 2 balance = balance - 2 elif((firstWheel == "CHERRY") and (secondWheel == "CHERRY") and (thirdWheel != "CHERRY")): win = 5 balance = balance - 5 elif((firstWheel == "CHERRY") and (secondWheel == "CHERRY") and (thirdWheel == "CHERRY")): win = 7 balance = balance - 7 elif((firstWheel == "ORANGE") and (secondWheel == "ORANGE") and ((thirdWheel == "ORANGE") or (thirdWheel == "BAR"))): win = 10 balance = balance - 10 elif((firstWheel == "PLUM") and (secondWheel == "PLUM") and ((thirdWheel == "PLUM") or (thirdWheel == "BAR"))): win = 14 balance = balance - 14 elif((firstWheel == "BELL") and (secondWheel == "BELL") and ((thirdWheel == "BELL") or (thirdWheel == "BAR"))): win = 20 balance = balance - 20 elif((firstWheel == "BAR") and (secondWheel == "BAR") and (thirdWheel == "BAR")): win = 250 balance = balance - 250 elif((firstWheel == "7") and (secondWheel == "7") and (thridWheel == "7")): win = balance balance = balance - win else: win = -1 balance = balance + 1 stake += win if win == balance: print ("Ganaste el JACKPOT!!") if(win > 0): print(firstWheel + '\t' + secondWheel + '\t' + thirdWheel + ' -- Ganaste $' + str(win)) time.sleep(3) os.system('cls' if os.name == 'nt' else 'clear') else: print(firstWheel + '\t' + secondWheel + '\t' + thirdWheel + ' -- Perdiste') time.sleep(2) os.system('cls' if os.name == 'nt' else 'clear') print() print('''Bienvenido a la máquina tragamonedas Comenzarás con $ 50 pesos. Se te preguntará si quieres jugar. Responda con sí / no. también puedes usar y / n No hay sensibilidad de mayúsculas, escríbela como quieras! Para ganar debes obtener una de las siguientes combinaciones: BAR\tBAR\tBAR\t\tpays\t$250 BELL\tBELL\tBELL/BAR\tpays\t$20 PLUM\tPLUM\tPLUM/BAR\tpays\t$14 ORANGE\tORANGE\tORANGE/BAR\tpays\t$10 CHERRY\tCHERRY\tCHERRY\t\tpays\t$7 CHERRY\tCHERRY\t -\t\tpays\t$5 CHERRY\t -\t -\t\tpays\t$2 7\t 7\t 7\t\tpays\t The Jackpot! ''') play()
0.189146
0.928279
# Analyzing New York City taxi data using big data tools At 10.5, ArcGIS Enterprise introduces [ArcGIS GeoAnalytics Server](http://server.arcgis.com/en/server/latest/get-started/windows/what-is-arcgis-geoanalytics-server-.htm) which provides you the ability to perform big data analysis on your infrastructure. This sample demonstrates the steps involved in performing an aggregation analysis on New York city taxi point data using ArcGIS API for Python. The data used in this sample can be downloaded from [NYC Taxi & Limousine Commission website](http://www.nyc.gov/html/tlc/html/about/trip_record_data.shtml). For this sample, data for the months January & Febuary of 2015 were used, each averaging 12 million records. **Note**: The ability to perform big data analysis is only available on ArcGIS Enterprise 10.5 licensed with a GeoAnalytics server and not yet available on ArcGIS Online. ## The NYC taxi data To give you an overview, let us take a look at a subset with 2000 points published as a feature service. ``` import arcgis from arcgis.gis import GIS ago_gis = GIS() # Connect to ArcGIS Online as an anonymous user search_subset = ago_gis.content.search("NYC_taxi_subset", item_type = "Feature Layer") subset_item = search_subset[0] subset_item ``` Let us bring up a map to display the data. ``` subset_map = ago_gis.map("New York, NY", zoomlevel=11) subset_map subset_map.add_layer(subset_item) ``` Let us access the feature layers and their attribute table to understand the structure of our data. In the cell below, we are using the `query()` method to get the attribute information. The `query()` method returns a `FeatureSet` object which can be considered as a collection of individual `Feature` objects. You can mine through the `FeatureSet`, get individual `Feature`s and read their attribute information to compose a table of all features and their attributes. However, the `FeatureSet` object provides a much easier way to get that information. Using the `df` property of a `FeatureSet`, you can read the attribute information as a `pandas` dataframe object. To run this cell, you need to have `pandas` Python package installed. If you get an error that pandas cannot be found, you can install it by typing the following in your terminal that is running the jupyter notebook. conda install pandas ``` subset_feature_layer = subset_item.layers[0] # query the attribute information. Limit to first 5 rows. query_result = subset_feature_layer.query(where = 'OBJECTID < 5', out_fields = "*", returnGeometry = False) att_data_frame = query_result.df # get as a Pandas dataframe att_data_frame ``` The table above represents the attribute information available from the NYC dataset. Columns like pickup, dropoff locations, fare, tips, toll, trip distance provide a wealth of infomation allowing many interesting patterns to be observed. Our full data dataset contains over 24 million points. To discern patterns out of it, let us aggregate the points into square blocks of 1 Kilometer length. ## Searching for big data file shares To process the csv data you have downloaded using GeoAnalyitcs Server, you need to register the data with your Geoanalytics Server. In this sample the data is in multiple csv files, which will be registered as a big data file share. Let us connect to an ArcGIS Enterprise. ``` gis = GIS("https://yourportal.domain.com/webcontext", "username", "password") ``` Ensure that the Geoanalytics is supported with our GIS. ``` arcgis.geoanalytics.is_supported() ``` Get the geoanalytics datastores and search it for the registered datasets: ``` datastores = arcgis.geoanalytics.get_datastores() bigdata_fileshares = datastores.search() bigdata_fileshares ``` NYC_taxi data is registered as a `big data file share` with the Geoanalytics datastore, so we can reference it: ``` data_item = bigdata_fileshares[5] ``` ## Registering big data file shares The code below shows how a big data file share can be registered with the geoanalytics datastores, in case it's not already registered. ``` data_item = datastores.add_bigdata("NYCdata", r"\\teton\atma_shared\datasets\NYC_taxi") ``` Once a big data file share is created, the GeoAnalytics server processes all the valid file types to discern the schema of the data. This process can take a few minutes depending on the size of your data. Once processed, querying the `manifest` property returns the schema. As you can see from below, the schema is similar to the subset we observed earlier in this sample. ``` data_item.manifest ``` ## Performing data aggregation When you add a big data file share datastore, a corresponding item gets created on your portal. You can search for it like a regular item and query its layers. ``` search_result = gis.content.search("", item_type = "big data file share") search_result data_item = search_result[5] data_item data_item.layers year_2015 = data_item.layers[0] year_2015 ``` ### Aggregate points tool The `aggregate_points()` tool can be accessed through the `tools.bigdata` property of your GIS. In this example, we are using this tool to aggregate the numerous points into 1 Kilometer square blocks. The tool creates a feature layer as an output which can be accessed once the processing is complete. ``` from arcgis.geoanalytics.summarize_data import aggregate_points arcgis.env.process_spatial_reference=3857 agg_result = aggregate_points(year_2015, bin_size=1, bin_size_unit='Kilometers') ``` ### Inspect the results Let us create a map and load the processed result which is a feature layer item. ``` processed_map = gis.map('New York, NY', 11) processed_map processed_map.add_layer(agg_result) ``` Let us inspect the analysis result using smart mapping. To learn more about this visualization capability, refer to the guide on [Smart Mapping](https://developers.arcgis.com/python/guide/smart-mapping/) under the 'Mapping and Visualization' section. ``` map2 = gis.map("New York, NY", 11) map2 map2.add_layer(agg_result, { "renderer":"ClassedColorRenderer", "field_name":"MAX_tip_amount", "normalizationField":'MAX_trip_distance', "classificationMethod":'natural-breaks', "opacity":0.75 }) ``` We can now start seeing patterns, such as which pickup areas resulted in higher tips for the cab drivers.
github_jupyter
import arcgis from arcgis.gis import GIS ago_gis = GIS() # Connect to ArcGIS Online as an anonymous user search_subset = ago_gis.content.search("NYC_taxi_subset", item_type = "Feature Layer") subset_item = search_subset[0] subset_item subset_map = ago_gis.map("New York, NY", zoomlevel=11) subset_map subset_map.add_layer(subset_item) subset_feature_layer = subset_item.layers[0] # query the attribute information. Limit to first 5 rows. query_result = subset_feature_layer.query(where = 'OBJECTID < 5', out_fields = "*", returnGeometry = False) att_data_frame = query_result.df # get as a Pandas dataframe att_data_frame gis = GIS("https://yourportal.domain.com/webcontext", "username", "password") arcgis.geoanalytics.is_supported() datastores = arcgis.geoanalytics.get_datastores() bigdata_fileshares = datastores.search() bigdata_fileshares data_item = bigdata_fileshares[5] data_item = datastores.add_bigdata("NYCdata", r"\\teton\atma_shared\datasets\NYC_taxi") data_item.manifest search_result = gis.content.search("", item_type = "big data file share") search_result data_item = search_result[5] data_item data_item.layers year_2015 = data_item.layers[0] year_2015 from arcgis.geoanalytics.summarize_data import aggregate_points arcgis.env.process_spatial_reference=3857 agg_result = aggregate_points(year_2015, bin_size=1, bin_size_unit='Kilometers') processed_map = gis.map('New York, NY', 11) processed_map processed_map.add_layer(agg_result) map2 = gis.map("New York, NY", 11) map2 map2.add_layer(agg_result, { "renderer":"ClassedColorRenderer", "field_name":"MAX_tip_amount", "normalizationField":'MAX_trip_distance', "classificationMethod":'natural-breaks', "opacity":0.75 })
0.579995
0.991732
# Analyzing the `lein-topology` function dependency network ## Automating recommendations to improve the software architecture ``` from py2cytoscape.data.cynetwork import CyNetwork from py2cytoscape.data.cyrest_client import CyRestClient from py2cytoscape.data.style import StyleUtil import sand import matplotlib.pyplot as plt %matplotlib notebook ``` ### `graph.from_edges` with a list of dictionaries Use `graph.from_edges` with an adjacency list consisting of two vertex names and an edge weight represented as a List of Dictionaries. ``` network_name = "5b2a6e3" network_collection = "lein-topology" data_path = "./data/" + network_collection + "-" + network_name edgelist_file = data_path + ".csv" edgelist_data = sand.csv_to_dicts(edgelist_file,header=['source', 'target', 'weight']) edgelist_data[:5] g = sand.from_edges(edgelist_data) g.summary() ``` ## Is the graph simple? A graph is simple if it does not have multiple edges between vertices and has no loops, i.e. an edge with the same source and target vertex. A graph that isn't simple can cause problems for some network analytics algorithms. ``` g.is_simple() ``` ## Initial Clustering based on Namespace Groups represent modules or communities in the network. Groups are based on the labels by default. ``` g.vs['group'][:5] ``` The vertices in the `lein topology` data set contain fully-qualified namespaces for functions. Grouping by name isn't particularly useful here: ``` len(set(g.vs['group'])) len(g.vs) ``` Because `sandbook` was build specifically for analyzing software and system networks, a `fqn_to_groups` grouping function is built in: ``` g.vs['group'] = sand.fqn_to_groups(g.vs['label']) len(set(g.vs['group'])) ``` This is a much more managable number of groups. Namespaces may also be useful as a separate attribute on the vertices: ``` g.vs['namespace'] = sand.namespaces(g.vs['label']) ``` ## Use degree centrality to identify candidates to filter from the analysis Outdegree represents the vertices that have the most dependencies, i.e. call the most number of functions. These functions could potentially be split into smaller, more cohesive functions. Indegree represents the vertices are depended on the most...changing them will have the most impact on other parts of the system. ``` from sand.degree import degree_distribution_count degree, count = degree_distribution_count(g) plt.title("Degree Distribution") plt.ylabel("count") plt.xlabel("degree") infig, = plt.loglog(degree, count, 'b-', marker='o') g.vs['outdegree'][:5] g.vs['indegree'][:5] ``` Which vertices have a degree of more than some majority percentage of the maxdegree? ``` g.maxdegree(mode='IN') g.maxdegree(mode='OUT') score = g.maxdegree() * .80 ``` ### These functions call the highest number of others and could potentially be split: ``` [v['name'] for v in g.vs.select(lambda vertex: vertex['outdegree'] >= 10)] ``` ### Changing these functions will have the most impact on other parts of the system: ``` [v['name'] for v in g.vs.select(lambda vertex: vertex['indegree'] >= 4)] ``` In this case, `clojure.core` and `clojure.test` namespaces have the most dependencies...unsurprising, given these are the foundational libraries of the language! These are good candidates to filter out of a visualization, since they often don't add deep insight to the aspects of the design specific to the program. ## Extract the subgraph of local namespaces based on our filter criteria There are some analyses where it will be useful to see all the vertices. For the high-level architecture diagram, we can focus on the functions local to the library's namespaces. We'll also keep functions that have side-effects to see if these are isolated to only a few key parts of the program: ``` # List all patterns of vertex names that we want to keep: names_to_keep = ('topology', 'clojure.core/*err*', 'clojure.core/println', 'clojure.zip', 'clojure.java.io', 'clojure.tools.namespace', 'leiningen.core.eval', 'clojure.repl') lv = g.vs(lambda v: any(match in v['label'] for match in names_to_keep)) lg = g.subgraph(lv) # Recompute degree after building the local subgraph (lg): lg.vs['indegree'] = lg.degree(mode="in") lg.vs['outdegree'] = lg.degree(mode="out") lg.summary() ``` # Visualizing the network in Cytoscape ## Verify that Cytoscape is running and get the current version ``` import sand.cytoscape as sc sc.print_version() ``` ## Load the network into Cytoscape with a default layout ``` # Create py2cytoscape client cy = CyRestClient() # Optional: delete existing Cytoscape sessions. cy.session.delete() # Load the network network = cy.network.create_from_igraph(lg, name=network_name, collection=network_collection) ``` ## Layout ``` # Apply default layout cy.layout.apply(name='force-directed', network=network) ``` ## Customize the style Use one of the included themes, or build your own. ``` style = cy.style.create('Ops') style.update_defaults(sc.ops) # Map the label property in the igraph data to Cytoscape's NODE_LABEL visual property style.create_passthrough_mapping(column='label', vp='NODE_LABEL', col_type='String') ``` ### Color vertices by namespace: ``` from sand.cytoscape import colors border_colors = { 'topology.finder': colors.BRIGHT_YELLOW, 'topology.dependencies': colors.BRIGHT_ORANGE, 'topology.dependencies-test': colors.BRIGHT_ORANGE, 'topology.qualifier': colors.BRIGHT_PURPLE, 'topology.symbols': colors.BRIGHT_BLUE, 'clojure.core': colors.BRIGHT_RED, 'clojure.java.io': colors.BRIGHT_RED, 'topology.printer': colors.BRIGHT_RED, 'leiningen.topology': colors.BRIGHT_WHITE, } fill_colors = { 'topology.finder': colors.DARK_YELLOW, 'topology.dependencies': colors.DARK_ORANGE, 'topology.dependencies-test': colors.DARK_ORANGE, 'topology.qualifier': colors.DARK_PURPLE, 'topology.symbols': colors.DARK_BLUE, 'clojure.core': colors.DARK_RED, 'clojure.java.io': colors.DARK_RED, 'topology.printer': colors.DARK_RED, 'leiningen.topology': colors.DARK_WHITE, } style.create_discrete_mapping(column='namespace', col_type='String', vp='NODE_FILL_COLOR', mappings=fill_colors) style.create_discrete_mapping(column='namespace', col_type='String', vp='NODE_BORDER_PAINT', mappings=border_colors) cy.style.apply(style, network) ``` ## Load layout coordinates from a previous session ``` positions_file = data_path + "-positions.csv" sc.layout_from_positions_csv(network, positions_file, cy) ``` ## Save the updated layout coordinates if you make changes A benefit of this workflow is the ability to manually tweak the algorithmic network layout in Cytoscape. After making changes, save the coordinates for a later session: ``` sc.positions_to_csv(network=network, path=positions_file) ``` ## Generate an SVG export Position the network in Cytoscape the way you want it, then trigger this export. When iterating, run all cells above, then all cells below this point to avoid race conditions with cytoscape's renderer. ``` # Hide all panels in the UI sc.hide_panels() # Fit to the window: cy.layout.fit(network=network) view_id = network.get_views()[0] view = network.get_view(view_id=view_id, format='view') # Zoom out slightly: view.update_network_view('NETWORK_SCALE_FACTOR', 0.65) # Shift the network to the left: view.update_network_view('NETWORK_CENTER_X_LOCATION', 150.0) from IPython.display import SVG, display svg_data = network.get_svg() display(SVG(svg_data)) # Write the svg to a file if everything looks good: with open(data_path + '.svg', 'wb') as f: f.write(svg_data) ``` ## To create an updated structure after making new commits: * Generate an updated network. * Copy the previous position file to use as a starting point for the next visualization. * Open Cytoscape or destroy existing collections if Cytoscape is already running. * Run all cells to load the visualization. * [Save the new layout if you make changes to node positions](/notebooks/architecture.ipynb#Save-the-updated-layout-coordinates-if-you-make-changes). ## Visualizing the network with a Dependency Structure Matrix (DSM) Each row shows a function's dependencies. Each column shows callers impacted by the function. ``` from bokeh.plotting import show, output_notebook from bokeh.palettes import all_palettes output_notebook() ``` ### Create a color palette We want to choose a color palette so that groups stand out. Find the number of groups that you have assigned to your vertices: ``` num_groups = max(set(lg.vs['group'])) num_groups ``` Now we need to [choose an appropriate palette](http://bokeh.pydata.org/en/latest/docs/reference/palettes.html) that accomodates this number of groups and achieves the desired visual separation. As a general rule, you'll need one of the [large palettes](http://bokeh.pydata.org/en/latest/docs/reference/palettes.html#large-palettes) for > 20 groups. Pass the name of the palette you choose to the `all_palettes` function: ``` palette = all_palettes['Category20'][num_groups + 1] ``` ### Determine the order to sort the vertex labels The matrix visualization will be rendered according to a vertex attribute used as the `sort_by` parameter in `matrix`. ``` lg.vs.attributes() ``` ### Render the matrix ``` p = sand.matrix(lg, 'indegree', "{}-{} by indegree".format(network_collection, network_name), 900, palette) show(p) p = matrix(lg, 'outdegree', "{}-{} by outdegree".format(network_collection, network_name), 900, palette) show(p) p = sand.matrix(lg, 'group', "{}-{} by group".format(network_collection, network_name), 900, palette) show(p) ``` ## Organizing the system by scoring coupling and cohesion ### Intuition Ordering by group / modules gives us a visual indication of how well the system accomplishes the design goal of loosely coupled and highly cohesive modules. We can quantify this idea. Clustering is a type of assignment problem seeking the optimal allocation of N components to M clusters. One of the prominent heuristics of system architecting is to choose modules such that they are as independent as possible...low coupling and high cohesion. We can objectively score these clustering algorithms using an objective function that considers both the size of the clusters ($C_i$) and the number of interactions outside the clusters ($I_0$) according to the following equation, where $\alpha = 10$, $\beta = 100$ or $\alpha = 1$, $\beta = 10$, and $M$ is the number of clusters: $Obj = \alpha \sum_{i=1}^{M}C_i^2 + \beta I_0$ _See page 25 of Design Structure Matrix Methods and Applications for more information._ Clustering objectives work against two competing forces: * We want to minimize the size of the largest modules...otherwise, we could just take the trivial result of putting everything into one module. M=1 * We want to minimize the number and/or strength of interactions among components that cross the module boundaries. As we get to more components, more and more interactions will be required to cross module boundaries. The extreme result would be M=N. The objective function could be evaluated for any number of potential designs that were manually or automatically created. This essentially provides a real-time feedback loop about the potential quality of a design. The range of the function is immediately bound by the two extremes. Your job as an architect and designer is to minimize this function. ### Scoring `lein-topology` For us to apply this scoring methodology meaningfully, we'll make a couple of simplifying assumptions: * `clojure.core` functions aren't moving to a different namespace. * tests shouldn't factor in to how the system is structured. With these, we can apply the filtering from above a bit more strictly to get an even smaller subgraph of the function call network: ``` v_to_keep = g.vs(lambda v: 'topology' in v['label'] and not 'test' in v['label']) tg = g.subgraph(v_to_keep) # Recompute degree after building the subgraph: tg.vs['indegree'] = tg.degree(mode="in") tg.vs['outdegree'] = tg.degree(mode="out") tg.summary() ``` The baseline modularity score of `lein-topology`'s core function dependency graph is: ``` from sand.modularity import objective objective(tg, tg.vs['group']) ``` Where is this on the range of possibilities? Suppose all functions were in the same namespace. We'll simulate this by setting the group membership vector to all 1's: ``` objective(tg, [1 for _ in range(len(tg.vs))]) ``` This is the degenerate case of M=1, so the objective function simply returns the square of the number of vertices: ``` len(tg.vs) * len(tg.vs) ``` The other extreme occurs when we have the extreme of M=N, or all functions in their own namespace. We can simulate this by providing a unique group membership id for each vertex: ``` objective(tg, range(len(tg.vs))) ``` Finally, we can compare our actual modularity score to a computational result. We can use Girvan-Newman edge-betweenness community detection to generate a modular design based on the network structure alone: ``` eb_membership = sand.edge_betweenness(tg, directed=True) len(set(eb_membership)) len(set(tg.vs['group'])) ``` So the edge betweenness algorithm comes up with fewer communities, i.e. namespace in this context. Let's see how the modularity score compares: ``` objective(tg, eb_membership) ``` If this score is lower than our actual baseline, than the computational community structure may represent an improvement over the current structure. Which namespaces have changed groups? We may wish to refactor the code to reflect this structure. If the edge betweenness modularity score is higher than our baseline, this fact acts as a quantitative defense of our design. ### The novelty here is receiving an algorithmic recommendation about how to improve the organization of the code. In this case, our current score of 121 is less than the algorithmic optimum of 133, so we can conclude we have a structure with acceptable coupling and cohesion.
github_jupyter
from py2cytoscape.data.cynetwork import CyNetwork from py2cytoscape.data.cyrest_client import CyRestClient from py2cytoscape.data.style import StyleUtil import sand import matplotlib.pyplot as plt %matplotlib notebook network_name = "5b2a6e3" network_collection = "lein-topology" data_path = "./data/" + network_collection + "-" + network_name edgelist_file = data_path + ".csv" edgelist_data = sand.csv_to_dicts(edgelist_file,header=['source', 'target', 'weight']) edgelist_data[:5] g = sand.from_edges(edgelist_data) g.summary() g.is_simple() g.vs['group'][:5] len(set(g.vs['group'])) len(g.vs) g.vs['group'] = sand.fqn_to_groups(g.vs['label']) len(set(g.vs['group'])) g.vs['namespace'] = sand.namespaces(g.vs['label']) from sand.degree import degree_distribution_count degree, count = degree_distribution_count(g) plt.title("Degree Distribution") plt.ylabel("count") plt.xlabel("degree") infig, = plt.loglog(degree, count, 'b-', marker='o') g.vs['outdegree'][:5] g.vs['indegree'][:5] g.maxdegree(mode='IN') g.maxdegree(mode='OUT') score = g.maxdegree() * .80 [v['name'] for v in g.vs.select(lambda vertex: vertex['outdegree'] >= 10)] [v['name'] for v in g.vs.select(lambda vertex: vertex['indegree'] >= 4)] # List all patterns of vertex names that we want to keep: names_to_keep = ('topology', 'clojure.core/*err*', 'clojure.core/println', 'clojure.zip', 'clojure.java.io', 'clojure.tools.namespace', 'leiningen.core.eval', 'clojure.repl') lv = g.vs(lambda v: any(match in v['label'] for match in names_to_keep)) lg = g.subgraph(lv) # Recompute degree after building the local subgraph (lg): lg.vs['indegree'] = lg.degree(mode="in") lg.vs['outdegree'] = lg.degree(mode="out") lg.summary() import sand.cytoscape as sc sc.print_version() # Create py2cytoscape client cy = CyRestClient() # Optional: delete existing Cytoscape sessions. cy.session.delete() # Load the network network = cy.network.create_from_igraph(lg, name=network_name, collection=network_collection) # Apply default layout cy.layout.apply(name='force-directed', network=network) style = cy.style.create('Ops') style.update_defaults(sc.ops) # Map the label property in the igraph data to Cytoscape's NODE_LABEL visual property style.create_passthrough_mapping(column='label', vp='NODE_LABEL', col_type='String') from sand.cytoscape import colors border_colors = { 'topology.finder': colors.BRIGHT_YELLOW, 'topology.dependencies': colors.BRIGHT_ORANGE, 'topology.dependencies-test': colors.BRIGHT_ORANGE, 'topology.qualifier': colors.BRIGHT_PURPLE, 'topology.symbols': colors.BRIGHT_BLUE, 'clojure.core': colors.BRIGHT_RED, 'clojure.java.io': colors.BRIGHT_RED, 'topology.printer': colors.BRIGHT_RED, 'leiningen.topology': colors.BRIGHT_WHITE, } fill_colors = { 'topology.finder': colors.DARK_YELLOW, 'topology.dependencies': colors.DARK_ORANGE, 'topology.dependencies-test': colors.DARK_ORANGE, 'topology.qualifier': colors.DARK_PURPLE, 'topology.symbols': colors.DARK_BLUE, 'clojure.core': colors.DARK_RED, 'clojure.java.io': colors.DARK_RED, 'topology.printer': colors.DARK_RED, 'leiningen.topology': colors.DARK_WHITE, } style.create_discrete_mapping(column='namespace', col_type='String', vp='NODE_FILL_COLOR', mappings=fill_colors) style.create_discrete_mapping(column='namespace', col_type='String', vp='NODE_BORDER_PAINT', mappings=border_colors) cy.style.apply(style, network) positions_file = data_path + "-positions.csv" sc.layout_from_positions_csv(network, positions_file, cy) sc.positions_to_csv(network=network, path=positions_file) # Hide all panels in the UI sc.hide_panels() # Fit to the window: cy.layout.fit(network=network) view_id = network.get_views()[0] view = network.get_view(view_id=view_id, format='view') # Zoom out slightly: view.update_network_view('NETWORK_SCALE_FACTOR', 0.65) # Shift the network to the left: view.update_network_view('NETWORK_CENTER_X_LOCATION', 150.0) from IPython.display import SVG, display svg_data = network.get_svg() display(SVG(svg_data)) # Write the svg to a file if everything looks good: with open(data_path + '.svg', 'wb') as f: f.write(svg_data) from bokeh.plotting import show, output_notebook from bokeh.palettes import all_palettes output_notebook() num_groups = max(set(lg.vs['group'])) num_groups palette = all_palettes['Category20'][num_groups + 1] lg.vs.attributes() p = sand.matrix(lg, 'indegree', "{}-{} by indegree".format(network_collection, network_name), 900, palette) show(p) p = matrix(lg, 'outdegree', "{}-{} by outdegree".format(network_collection, network_name), 900, palette) show(p) p = sand.matrix(lg, 'group', "{}-{} by group".format(network_collection, network_name), 900, palette) show(p) v_to_keep = g.vs(lambda v: 'topology' in v['label'] and not 'test' in v['label']) tg = g.subgraph(v_to_keep) # Recompute degree after building the subgraph: tg.vs['indegree'] = tg.degree(mode="in") tg.vs['outdegree'] = tg.degree(mode="out") tg.summary() from sand.modularity import objective objective(tg, tg.vs['group']) objective(tg, [1 for _ in range(len(tg.vs))]) len(tg.vs) * len(tg.vs) objective(tg, range(len(tg.vs))) eb_membership = sand.edge_betweenness(tg, directed=True) len(set(eb_membership)) len(set(tg.vs['group'])) objective(tg, eb_membership)
0.640411
0.981471
# Detecting and Analyzing Faces Computer vision solutions often require an artificial intelligence (AI) solution to be able to detect, analyze, or identify human faces. or example, suppose the retail company Northwind Traders has decided to implement a "smart store", in which AI services monitor the store to identify customers requiring assistance, and direct employees to help them. One way to accomplish this is to perform facial detection and analysis - in other words, determine if there are any faces in the images, and if so analyze their features. ![A robot analyzing a face](./images/face_analysis.jpg) ## Use the Face cognitive service to detect faces Suppose the smart store system that Northwind Traders wants to create needs to be able to detect customers and analyze their facial features. In Microsoft Azure, you can use **Face**, part of Azure Cognitive Services to do this. ### Create a Cognitive Services Resource Let's start by creating a **Cognitive Services** resource in your Azure subscription. > **Note**: If you already have a Cognitive Services resource, just open its **Quick start** page in the Azure portal and copy its key and endpoint to the cell below. Otherwise, follow the steps below to create one. 1. In another browser tab, open the Azure portal at https://portal.azure.com, signing in with your Microsoft account. 2. Click the **&#65291;Create a resource** button, search for *Cognitive Services*, and create a **Cognitive Services** resource with the following settings: - **Name**: *Enter a unique name*. - **Subscription**: *Your Azure subscription*. - **Location**: *Choose any available region*: - **Pricing tier**: S0 - **Resource group**: *Create a resource group with a unique name*. 3. Wait for deployment to complete. Then go to your cognitive services resource, and on the **Overview** page, click the link to manage the keys for the service. You will need the endpoint and keys to connect to your cognitive services resource from client applications. ### Get the Key and Endpoint for your Cognitive Services resource To use your cognitive services resource, client applications need its endpoint and authentication key: 1. In the Azure portal, on the **Keys and Endpoint** page for your cognitive service resource, copy the **Key1** for your resource and paste it in the code below, replacing **YOUR_COG_KEY** (if CTRL+V doesn't paste, try SHIFT+CTRL+V). 2. Copy the **endpoint** for your resource and and paste it in the code below, replacing **YOUR_COG_ENDPOINT**. 3. Run the code in the cell below by clicking the Run Cell <span>&#9655</span> button (at the top left of the cell). ``` cog_key = 'YOUR_COG_KEY' cog_endpoint = 'YOUR_COG_ENDPOINT' print('Ready to use cognitive services at {} using key {}'.format(cog_endpoint, cog_key)) ``` To use the Face service in your Cognitive Services resource, you'll need to install the Azure Cognitive Services Face package. ``` ! pip install azure-cognitiveservices-vision-face ``` Now that you have a Cognitive Services resource and the SDK package installed, you can use the Face service to detect human faces in the store. Run the code cell below to see an example. ``` from azure.cognitiveservices.vision.face import FaceClient from msrest.authentication import CognitiveServicesCredentials from python_code import faces import os %matplotlib inline # Create a face detection client. face_client = FaceClient(cog_endpoint, CognitiveServicesCredentials(cog_key)) # Open an image image_path = os.path.join('data', 'face', 'store_cam2.jpg') image_stream = open(image_path, "rb") # Detect faces detected_faces = face_client.face.detect_with_stream(image=image_stream) # Display the faces (code in python_code/faces.py) faces.show_faces(image_path, detected_faces) ``` Each detected face is assigned a unique ID, so your application can identify each individual face that was detected. Run the cell below to see the IDs for some more shopper faces. ``` # Open an image image_path = os.path.join('data', 'face', 'store_cam3.jpg') image_stream = open(image_path, "rb") # Detect faces detected_faces = face_client.face.detect_with_stream(image=image_stream) # Display the faces (code in python_code/faces.py) faces.show_faces(image_path, detected_faces, show_id=True) ``` ## Analyze facial attributes Face can do much more than simply detect faces. It can also analyze facial features and expressions to suggest age and emotional state; For example, run the code below to analyze the facial attributes of a shopper. ``` # Open an image image_path = os.path.join('data', 'face', 'store_cam1.jpg') image_stream = open(image_path, "rb") # Detect faces and specified facial attributes attributes = ['age', 'emotion'] detected_faces = face_client.face.detect_with_stream(image=image_stream, return_face_attributes=attributes) # Display the faces and attributes (code in python_code/faces.py) faces.show_face_attributes(image_path, detected_faces) ``` Based on the emotion scores detected for the customer in the image, the customer seems pretty happy with the shopping experience. ## Find similar faces The face IDs that are created for each detected face are used to individually identify face detections. You can use these IDs to compare a detected face to previously detected faces and find faces with similar features. For example, run the cell below to compare the shopper in one image with shoppers in another, and find a matching face. ``` # Get the ID of the first face in image 1 image_1_path = os.path.join('data', 'face', 'store_cam3.jpg') image_1_stream = open(image_1_path, "rb") image_1_faces = face_client.face.detect_with_stream(image=image_1_stream) face_1 = image_1_faces[0] # Get the face IDs in a second image image_2_path = os.path.join('data', 'face', 'store_cam2.jpg') image_2_stream = open(image_2_path, "rb") image_2_faces = face_client.face.detect_with_stream(image=image_2_stream) image_2_face_ids = list(map(lambda face: face.face_id, image_2_faces)) # Find faces in image 2 that are similar to the one in image 1 similar_faces = face_client.face.find_similar(face_id=face_1.face_id, face_ids=image_2_face_ids) # Show the face in image 1, and similar faces in image 2(code in python_code/face.py) faces.show_similar_faces(image_1_path, face_1, image_2_path, image_2_faces, similar_faces) ``` ## Recognize faces So far you've seen that Face can detect faces and facial features, and can identify two faces that are similar to one another. You can take things a step further by inplementing a *facial recognition* solution in which you train Face to recognize a specific person's face. This can be useful in a variety of scenarios, such as automatically tagging photographs of friends in a social media application, or using facial recognition as part of a biometric identity verification system. To see how this works, let's suppose the Northwind Traders company wants to use facial recognition to ensure that only authorized employees in the IT department can access secure systems. We'll start by creating a *person group* to represent the authorized employees. ``` group_id = 'employee_group_id' try: # Delete group if it already exists face_client.person_group.delete(group_id) except Exception as ex: print(ex.message) finally: face_client.person_group.create(group_id, 'employees') print ('Group created!') ``` Now that the *person group* exists, we can add a *person* for each employee we want to include in the group, and then register multiple photographs of each person so that Face can learn the distinct facial characetristics of each person. Ideally, the images should show the same person in different poses and with different facial expressions. We'll add a single employee called Wendell, and register three photographs of the employee. ``` import matplotlib.pyplot as plt from PIL import Image import os %matplotlib inline # Add a person (Wendell) to the group wendell = face_client.person_group_person.create(group_id, 'Wendell') # Get photo's of Wendell folder = os.path.join('data', 'face', 'wendell') wendell_pics = os.listdir(folder) # Register the photos i = 0 fig = plt.figure(figsize=(8, 8)) for pic in wendell_pics: # Add each photo to person in person group img_path = os.path.join(folder, pic) img_stream = open(img_path, "rb") face_client.person_group_person.add_face_from_stream(group_id, wendell.person_id, img_stream) # Display each image img = Image.open(img_path) i +=1 a=fig.add_subplot(1,len(wendell_pics), i) a.axis('off') imgplot = plt.imshow(img) plt.show() ``` With the person added, and photographs registered, we can now train Face to recognize each person. ``` face_client.person_group.train(group_id) print('Trained!') ``` Now, with the model trained, you can use it to identify recognized faces in an image. ``` # Get the face IDs in a second image image_path = os.path.join('data', 'face', 'employees.jpg') image_stream = open(image_path, "rb") image_faces = face_client.face.detect_with_stream(image=image_stream) image_face_ids = list(map(lambda face: face.face_id, image_faces)) # Get recognized face names face_names = {} recognized_faces = face_client.face.identify(image_face_ids, group_id) for face in recognized_faces: person_name = face_client.person_group_person.get(group_id, face.candidates[0].person_id).name face_names[face.face_id] = person_name # show recognized faces faces.show_recognized_faces(image_path, image_faces, face_names) ``` ## Learn More To learn more about the Face cognitive service, see the [Face documentation](https://docs.microsoft.com/azure/cognitive-services/face/)
github_jupyter
cog_key = 'YOUR_COG_KEY' cog_endpoint = 'YOUR_COG_ENDPOINT' print('Ready to use cognitive services at {} using key {}'.format(cog_endpoint, cog_key)) ! pip install azure-cognitiveservices-vision-face from azure.cognitiveservices.vision.face import FaceClient from msrest.authentication import CognitiveServicesCredentials from python_code import faces import os %matplotlib inline # Create a face detection client. face_client = FaceClient(cog_endpoint, CognitiveServicesCredentials(cog_key)) # Open an image image_path = os.path.join('data', 'face', 'store_cam2.jpg') image_stream = open(image_path, "rb") # Detect faces detected_faces = face_client.face.detect_with_stream(image=image_stream) # Display the faces (code in python_code/faces.py) faces.show_faces(image_path, detected_faces) # Open an image image_path = os.path.join('data', 'face', 'store_cam3.jpg') image_stream = open(image_path, "rb") # Detect faces detected_faces = face_client.face.detect_with_stream(image=image_stream) # Display the faces (code in python_code/faces.py) faces.show_faces(image_path, detected_faces, show_id=True) # Open an image image_path = os.path.join('data', 'face', 'store_cam1.jpg') image_stream = open(image_path, "rb") # Detect faces and specified facial attributes attributes = ['age', 'emotion'] detected_faces = face_client.face.detect_with_stream(image=image_stream, return_face_attributes=attributes) # Display the faces and attributes (code in python_code/faces.py) faces.show_face_attributes(image_path, detected_faces) # Get the ID of the first face in image 1 image_1_path = os.path.join('data', 'face', 'store_cam3.jpg') image_1_stream = open(image_1_path, "rb") image_1_faces = face_client.face.detect_with_stream(image=image_1_stream) face_1 = image_1_faces[0] # Get the face IDs in a second image image_2_path = os.path.join('data', 'face', 'store_cam2.jpg') image_2_stream = open(image_2_path, "rb") image_2_faces = face_client.face.detect_with_stream(image=image_2_stream) image_2_face_ids = list(map(lambda face: face.face_id, image_2_faces)) # Find faces in image 2 that are similar to the one in image 1 similar_faces = face_client.face.find_similar(face_id=face_1.face_id, face_ids=image_2_face_ids) # Show the face in image 1, and similar faces in image 2(code in python_code/face.py) faces.show_similar_faces(image_1_path, face_1, image_2_path, image_2_faces, similar_faces) group_id = 'employee_group_id' try: # Delete group if it already exists face_client.person_group.delete(group_id) except Exception as ex: print(ex.message) finally: face_client.person_group.create(group_id, 'employees') print ('Group created!') import matplotlib.pyplot as plt from PIL import Image import os %matplotlib inline # Add a person (Wendell) to the group wendell = face_client.person_group_person.create(group_id, 'Wendell') # Get photo's of Wendell folder = os.path.join('data', 'face', 'wendell') wendell_pics = os.listdir(folder) # Register the photos i = 0 fig = plt.figure(figsize=(8, 8)) for pic in wendell_pics: # Add each photo to person in person group img_path = os.path.join(folder, pic) img_stream = open(img_path, "rb") face_client.person_group_person.add_face_from_stream(group_id, wendell.person_id, img_stream) # Display each image img = Image.open(img_path) i +=1 a=fig.add_subplot(1,len(wendell_pics), i) a.axis('off') imgplot = plt.imshow(img) plt.show() face_client.person_group.train(group_id) print('Trained!') # Get the face IDs in a second image image_path = os.path.join('data', 'face', 'employees.jpg') image_stream = open(image_path, "rb") image_faces = face_client.face.detect_with_stream(image=image_stream) image_face_ids = list(map(lambda face: face.face_id, image_faces)) # Get recognized face names face_names = {} recognized_faces = face_client.face.identify(image_face_ids, group_id) for face in recognized_faces: person_name = face_client.person_group_person.get(group_id, face.candidates[0].person_id).name face_names[face.face_id] = person_name # show recognized faces faces.show_recognized_faces(image_path, image_faces, face_names)
0.549399
0.961353
<a href="https://colab.research.google.com/github/Asuskf/Curso-pandas-1.x-en-espa-ol/blob/master/1.-%20Fundamentos%20de%20Pandas/1.1%20Colab/Creando_el_primer_DataFrame_desde_un_csv.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Descargando y consumiendo un csv ``` # Descargamos el Dataset ! wget http://srea64.github.io/msan622/project/pokemon.csv """ Importamos la librería pandas """ import pandas """ pd: responde al alias read_csv: Función de Pandas que permite trabajar con csv """ pandas.read_csv('pokemon.csv') """ Importamos la librería pandas y le colocamos el alias pd para ser llamada más adelante """ import pandas as pd """ pd: responde al alias read_csv: Función de Pandas que permite trabajar con csv """ df_pokemon = pd.read_csv('pokemon.csv') df_pokemon ``` # Leemos directamente de la URL origen ``` import pandas as pd url = "http://srea64.github.io/msan622/project/pokemon.csv" pd.read_csv(url) ``` # Columna con valores NA ``` # Saber que columna del dataframe tiene valores NA df_pokemon.isnull().sum() ``` # Nombre de las columnas ``` # Saber los nombres de las columnas que conforman mi dataframe list(df_pokemon) ``` # Consultar datos en base al nombre de la columna ``` # Extraer información de la columna con su nombre df_pokemon['id'] # En vase al nombre de la columna extraemos el mismo elemento df_pokemon['Name'][1] ``` # Consultar datos en base a la posición de la columna ``` # Extraer el nombre de la columna por su posición df_pokemon.columns[1] """ 1.- Extraemos el nombre de la columna por su posición 2.- Usamos ese nombre para extraer la información """ df_pokemon[df_pokemon.columns[1]] """ 1.- Extraemos el nombre de la columna por su posición 2.- Usamos ese nombre para extraer la información 3.- Tomamos el primer elemento de la información extraída """ df_pokemon[df_pokemon.columns[1]][1] ``` # Extraer información en base a los datos de una fila ``` # Extraer información en base a la primera columna en este caso 'id' df_pokemon.loc[0] # Extraer información en base a la columna seleccionada por el usuario en este caso 'Name' df_pokemon.loc[df_pokemon['Name'] == 'Bulbasaur'] # Extraer información en base a la posición de columna en este caso 'Name' df_pokemon.loc[df_pokemon[df_pokemon.columns[1]] == 'Bulbasaur'] ``` # Ejemplo de uso para filtrar datos de un dataframe ``` """ Otro ejemplo Extraemos los pokemons que tengan un ataque mayor o igual a 50 """ df_pokemon.loc[df_pokemon['Attack'] >= 50] ```
github_jupyter
# Descargamos el Dataset ! wget http://srea64.github.io/msan622/project/pokemon.csv """ Importamos la librería pandas """ import pandas """ pd: responde al alias read_csv: Función de Pandas que permite trabajar con csv """ pandas.read_csv('pokemon.csv') """ Importamos la librería pandas y le colocamos el alias pd para ser llamada más adelante """ import pandas as pd """ pd: responde al alias read_csv: Función de Pandas que permite trabajar con csv """ df_pokemon = pd.read_csv('pokemon.csv') df_pokemon import pandas as pd url = "http://srea64.github.io/msan622/project/pokemon.csv" pd.read_csv(url) # Saber que columna del dataframe tiene valores NA df_pokemon.isnull().sum() # Saber los nombres de las columnas que conforman mi dataframe list(df_pokemon) # Extraer información de la columna con su nombre df_pokemon['id'] # En vase al nombre de la columna extraemos el mismo elemento df_pokemon['Name'][1] # Extraer el nombre de la columna por su posición df_pokemon.columns[1] """ 1.- Extraemos el nombre de la columna por su posición 2.- Usamos ese nombre para extraer la información """ df_pokemon[df_pokemon.columns[1]] """ 1.- Extraemos el nombre de la columna por su posición 2.- Usamos ese nombre para extraer la información 3.- Tomamos el primer elemento de la información extraída """ df_pokemon[df_pokemon.columns[1]][1] # Extraer información en base a la primera columna en este caso 'id' df_pokemon.loc[0] # Extraer información en base a la columna seleccionada por el usuario en este caso 'Name' df_pokemon.loc[df_pokemon['Name'] == 'Bulbasaur'] # Extraer información en base a la posición de columna en este caso 'Name' df_pokemon.loc[df_pokemon[df_pokemon.columns[1]] == 'Bulbasaur'] """ Otro ejemplo Extraemos los pokemons que tengan un ataque mayor o igual a 50 """ df_pokemon.loc[df_pokemon['Attack'] >= 50]
0.30013
0.902695
<a href="https://colab.research.google.com/github/vigneshwaran-dev/CV-research-timeline/blob/main/ResNet/resnet50.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` import tensorflow as tf from tensorflow.keras import Model from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D, AveragePooling2D, BatchNormalization, Input, Add, ZeroPadding2D from tensorflow.keras.losses import categorical_crossentropy from tensorflow.keras.optimizers import SGD ``` Defining the Model as per the Original Paper ``` def residual_block(model, filter_map): residue = model # 1st Layer model = Conv2D(filters=filter_map[0], kernel_size=(1, 1), strides=(1, 1), padding='valid')(model) model = BatchNormalization()(model) model = Activation('relu')(model) # 2nd Layer model = Conv2D(filters=filter_map[1], kernel_size=(3, 3), strides=(1, 1), padding='same')(model) model = BatchNormalization()(model) model = Activation('relu')(model) # 3rd Layer model = Conv2D(filters=filter_map[2], kernel_size=(1, 1), strides=(1, 1), padding='valid')(model) model = BatchNormalization()(model) model = Add()([model, residue]) model = Activation('relu')(model) return model def residual_skip(model, filter_map, stride): residue = Conv2D(filters=filter_map[2], kernel_size=(1, 1), strides=(stride, stride), padding='valid')(model) residue = BatchNormalization()(residue) # 1st Layer model = Conv2D(filters=filter_map[0], kernel_size=(1, 1), strides=(stride, stride), padding='valid')(model) model = BatchNormalization()(model) model = Activation('relu')(model) # 2nd Layer model = Conv2D(filters=filter_map[1], kernel_size=(3, 3), strides=(1, 1), padding='same')(model) model = BatchNormalization()(model) model = Activation('relu')(model) # 3rd Layer model = Conv2D(filters=filter_map[2], kernel_size=(3, 3), strides=(1, 1), padding='same')(model) model = BatchNormalization()(model) model = Add()([model, residue]) model = Activation('relu')(model) return model _input = Input(shape=(224, 224, 3)) # 1st Block model = Conv2D(filters=64, kernel_size=(7, 7), strides=(2, 2))(_input) model = BatchNormalization()(model) model = Activation('relu')(model) model = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(model) # 2nd Block model = residual_skip(model, [64, 256, 256], 1) model = residual_block(model, [64, 256, 256]) model = residual_block(model, [64, 256, 256]) # 3rd Block model = residual_skip(model, [128, 512, 512], 2) model = residual_block(model, [128, 512, 512]) model = residual_block(model, [128, 512, 512]) model = residual_block(model, [128, 512, 512]) # 4th Block model = residual_skip(model, [256, 1024, 1024], 2) model = residual_block(model, [256, 1024, 1024]) model = residual_block(model, [256, 1024, 1024]) model = residual_block(model, [256, 1024, 1024]) model = residual_block(model, [256, 1024, 1024]) model = residual_block(model, [256, 1024, 1024]) # 5th Block model = residual_skip(model, [512, 2048, 2048], 2) model = residual_block(model, [512, 2048, 2048]) model = residual_block(model, [512, 2048, 2048]) model = AveragePooling2D(pool_size=(2, 2), padding='same')(model) model = Flatten()(model) model = Dense(1000)(model) model = Activation('softmax')(model) model = Model(inputs=_input, outputs=model, name='ResNet50') model.summary() model.compile(loss=categorical_crossentropy, optimizer=SGD(learning_rate=0.01), metrics=['accuracy']) ``` Considering the data to be present in TRAIN_DATA_LOCATION and VALIDATION_DATA_LOCATION directories and running them through data generators to perform live data augumentation during the training process ``` from tensorflow.keras.preprocessing.image import ImageDataGenerator train_dir = 'TRAIN_DATA_LOCATION' valid_dir = 'VALIDATION_DATA_LOCATION' BATCH_SIZE = 32 train_datagen = ImageDataGenerator(rescale=1./255, rotation_range=10, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.1, zoom_range=0.1) train_generator = train_datagen.flow_from_directory(train_dir, target_size=(224, 224), color_mode='rgb', batch_size=BATCH_SIZE, seed=1, shuffle=True, class_mode='categorical') valid_datagen = ImageDataGenerator(rescale=1.0/255.0) valid_generator = valid_datagen.flow_from_directory(valid_dir, target_size=(224, 224), color_mode='rgb', batch_size=BATCH_SIZE, seed=7, shuffle=True, class_mode='categorical') train_num = train_generator.samples ``` Training the Model ``` import datetime log_dir = 'logs/fit/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S') tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir) callback_list = [tensorboard_callback] model.fit(train_generator, epochs=1, steps_per_epoch=train_num // BATCH_SIZE, validation_data=valid_generator, validation_steps=valid_num // BATCH_SIZE, callbacks=callback_list, verbose=1) model.save('vgg19.h5') ``` Visualizing the performance using Tensorboard ``` %load_ext tensorboard %tensorboard --logdir logs/fit ``` Prediction ``` x_valid, label_batch = next(iter(valid_generator)) prediction_values = model.predict_classes(x_valid) print(prediction_values) ```
github_jupyter
import tensorflow as tf from tensorflow.keras import Model from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D, AveragePooling2D, BatchNormalization, Input, Add, ZeroPadding2D from tensorflow.keras.losses import categorical_crossentropy from tensorflow.keras.optimizers import SGD def residual_block(model, filter_map): residue = model # 1st Layer model = Conv2D(filters=filter_map[0], kernel_size=(1, 1), strides=(1, 1), padding='valid')(model) model = BatchNormalization()(model) model = Activation('relu')(model) # 2nd Layer model = Conv2D(filters=filter_map[1], kernel_size=(3, 3), strides=(1, 1), padding='same')(model) model = BatchNormalization()(model) model = Activation('relu')(model) # 3rd Layer model = Conv2D(filters=filter_map[2], kernel_size=(1, 1), strides=(1, 1), padding='valid')(model) model = BatchNormalization()(model) model = Add()([model, residue]) model = Activation('relu')(model) return model def residual_skip(model, filter_map, stride): residue = Conv2D(filters=filter_map[2], kernel_size=(1, 1), strides=(stride, stride), padding='valid')(model) residue = BatchNormalization()(residue) # 1st Layer model = Conv2D(filters=filter_map[0], kernel_size=(1, 1), strides=(stride, stride), padding='valid')(model) model = BatchNormalization()(model) model = Activation('relu')(model) # 2nd Layer model = Conv2D(filters=filter_map[1], kernel_size=(3, 3), strides=(1, 1), padding='same')(model) model = BatchNormalization()(model) model = Activation('relu')(model) # 3rd Layer model = Conv2D(filters=filter_map[2], kernel_size=(3, 3), strides=(1, 1), padding='same')(model) model = BatchNormalization()(model) model = Add()([model, residue]) model = Activation('relu')(model) return model _input = Input(shape=(224, 224, 3)) # 1st Block model = Conv2D(filters=64, kernel_size=(7, 7), strides=(2, 2))(_input) model = BatchNormalization()(model) model = Activation('relu')(model) model = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(model) # 2nd Block model = residual_skip(model, [64, 256, 256], 1) model = residual_block(model, [64, 256, 256]) model = residual_block(model, [64, 256, 256]) # 3rd Block model = residual_skip(model, [128, 512, 512], 2) model = residual_block(model, [128, 512, 512]) model = residual_block(model, [128, 512, 512]) model = residual_block(model, [128, 512, 512]) # 4th Block model = residual_skip(model, [256, 1024, 1024], 2) model = residual_block(model, [256, 1024, 1024]) model = residual_block(model, [256, 1024, 1024]) model = residual_block(model, [256, 1024, 1024]) model = residual_block(model, [256, 1024, 1024]) model = residual_block(model, [256, 1024, 1024]) # 5th Block model = residual_skip(model, [512, 2048, 2048], 2) model = residual_block(model, [512, 2048, 2048]) model = residual_block(model, [512, 2048, 2048]) model = AveragePooling2D(pool_size=(2, 2), padding='same')(model) model = Flatten()(model) model = Dense(1000)(model) model = Activation('softmax')(model) model = Model(inputs=_input, outputs=model, name='ResNet50') model.summary() model.compile(loss=categorical_crossentropy, optimizer=SGD(learning_rate=0.01), metrics=['accuracy']) from tensorflow.keras.preprocessing.image import ImageDataGenerator train_dir = 'TRAIN_DATA_LOCATION' valid_dir = 'VALIDATION_DATA_LOCATION' BATCH_SIZE = 32 train_datagen = ImageDataGenerator(rescale=1./255, rotation_range=10, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.1, zoom_range=0.1) train_generator = train_datagen.flow_from_directory(train_dir, target_size=(224, 224), color_mode='rgb', batch_size=BATCH_SIZE, seed=1, shuffle=True, class_mode='categorical') valid_datagen = ImageDataGenerator(rescale=1.0/255.0) valid_generator = valid_datagen.flow_from_directory(valid_dir, target_size=(224, 224), color_mode='rgb', batch_size=BATCH_SIZE, seed=7, shuffle=True, class_mode='categorical') train_num = train_generator.samples import datetime log_dir = 'logs/fit/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S') tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir) callback_list = [tensorboard_callback] model.fit(train_generator, epochs=1, steps_per_epoch=train_num // BATCH_SIZE, validation_data=valid_generator, validation_steps=valid_num // BATCH_SIZE, callbacks=callback_list, verbose=1) model.save('vgg19.h5') %load_ext tensorboard %tensorboard --logdir logs/fit x_valid, label_batch = next(iter(valid_generator)) prediction_values = model.predict_classes(x_valid) print(prediction_values)
0.875028
0.949389
**Notas para contenedor de docker:** Comando de docker para ejecución de la nota de forma local: nota: cambiar `<ruta a mi directorio>` por la ruta de directorio que se desea mapear a `/datos` dentro del contenedor de docker. ``` docker run --rm -v <ruta a mi directorio>:/datos --name jupyterlab_local -p 8888:8888 -d palmoreck/jupyterlab:1.1.0 ``` password para jupyterlab: `qwerty` Detener el contenedor de docker: ``` docker stop jupyterlab_local ``` Documentación de la imagen de docker `palmoreck/jupyterlab:1.1.0` en [liga](https://github.com/palmoreck/dockerfiles/tree/master/jupyterlab). --- Nota generada a partir de [liga](https://www.dropbox.com/s/5bc6tn39o0qqg35/1.3.Condicion_estabilidad_y_normas.pdf?dl=0) **La siguiente celda muestra el modo de utilizar el comando magic de `%pip` para instalar paquetes desde jupyterlab.** Ver [liga](https://ipython.readthedocs.io/en/stable/interactive/magics.html#built-in-magic-commands) para magic commands. ``` %pip install -q --user numpy matplotlib scipy ``` La siguiente celda reiniciará el kernel de **IPython** para cargar los paquetes instalados en la celda anterior. Dar **Ok** en el mensaje que salga y continuar con el contenido del notebook. ``` import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) ``` # 1.3 Condición de un problema y estabilidad de un algoritmo Dos temas fundamentales en el análisis numérico son: la **condición de un problema** y **estabilidad de un algoritmo**. El condicionamiento tiene que ver con el comportamiento de un problema ante perturbaciones y la estabilidad con el comportamiento de un algoritmo (usado para resolver un problema) ante perturbaciones. La exactitud de un cálculo dependerá finalmente de una combinación de estos términos: <p style="text-align: center;">Exactitud = Condición + Estabilidad</p> La falta de exactitud en un problema se presenta entonces por problemas mal condicionados (no importando si los algoritmos son estables o inestables) y algoritmos inestables (no importando si los problemas son mal o bien condicionados). ## Perturbaciones La condición de un problema y estabilidad de un algoritmo hacen referencia al término **perturbación**. Tal término conduce a pensar en perturbaciones "chicas" o "grandes". Para dar una medida de lo anterior se utiliza el concepto de **norma**. Ver final de esta nota para definición de norma y propiedades. ## Condición de un problema Pensemos a un problema como una función $f: \mathbb{X} \rightarrow \mathbb{Y}$ donde $\mathbb{X}$ es un espacio vectorial con norma definida y $\mathbb{Y}$ es otro espacio vectorial de soluciones con una norma definida. Llamemos instancia de un problema a la combinación entre $x,f$ y nos interesa el comportamiento de $f$ en $x$. Usamos el nombre de "problema" para referirnos al de instancia del problema. Un problema (instancia) bien condicionado tiene la propiedad de que todas las perturbaciones pequeñas en $x$ conducen a pequeños cambios en $f(x)$. Y es mal condicionado si perturbaciones pequeñas en $x$ conducen a grandes cambios en $f(x)$. El uso de los términos "pequeño" o "grande" dependen del problema mismo. Sea $\hat{x} = x + \Delta x$ con $\Delta x$ una perturbación pequeña de $x$. El **número de condición relativo del problema $f$ en $x$** es: $$\text{Cond}_f^R = \frac{\text{ErrRel}(f(\hat{x}))}{\text{ErrRel}(\hat{x})} = \frac{\frac{||f(\hat{x})-f(x)||}{||f(x)||}}{\frac{||x-\hat{x}||}{||x||}}$$ considerando $x,f(x) \neq 0$. **Obs:** si $f$ es una función diferenciable, podemos evaluar $\text{Cond}_f^R$ con la derivada de $f$, pues a primer orden (usando teorema de Taylor): $f(\hat{x})-f(x) \approx \mathcal{J}_f(x)\Delta x$ con igualdad para $\Delta x \rightarrow 0$ y $\mathcal{J}_f$ la Jacobiana de $f$ definida como una matriz con entradas: $(\mathcal{J}_f(x))_{ij} = \frac{\partial f_i(x)}{\partial x_j}$. Por tanto, se tiene: $$\text{Cond}_{f}^R = \frac{||\mathcal{J}_f(x)||||x||}{||f(x)||}$$ y $||\mathcal{J}_f(x)||$ es una norma matricial inducida por las normas en $\mathbb{X}, \mathbb{Y}$. Ver final de esta nota para definición de norma y propiedades. **Comentario:** en la práctica se considera a un problema **bien condicionado** si $\text{Cond}_f^R$ es "pequeño": menor a $10$, **medianamente condicionado** si es de orden entre $10^1$ y $10^2$ y **mal condicionado** si es "grande": mayor a $10^3$. **Ejercicio:** Calcular $\text{Cond}_f^R$ de los siguientes problemas. Para $x \in \mathbb{R}$ usa el valor absoluto y para $x \in \mathbb{R}^n$ usa $||x||_\infty$. 1. $x \in \mathbb{R} - \{0\}$. Problema: realizar la operación $\frac{x}{2}$. 2. $x \geq 0$. Problema: calcular $\sqrt{x}$. 3. $x \approx \frac{\pi}{2}$. Problema: calcular $\cos(x)$. 4. $x \in \mathbb{R}^2$. Problema: calcular $x_1-x_2$. **Comentario:** las dificultades que pueden surgir al resolver un problema **no** siempre están relacionadas con una fórmula o un algoritmo mal diseñado sino con el problema en cuestión. En el ejercicio anterior, observamos que áun utilizando **aritmética exacta**, la solución del problema puede ser altamente sensible a perturbaciones a los datos de entrada. Por esto el número de condición relativo se define de acuerdo a perturbaciones en los datos de entrada y mide la perturbación en los datos de salida que uno espera: $$\text{Cond}_f^R = \frac{||\text{Cambios relativos en la solución}||}{||\text{Cambios relativos en los datos de entrada}||}.$$ ## Estabilidad de un algoritmo Pensemos a un algoritmo $\hat{f}$ como una función $\hat{f}:\mathbb{X}\rightarrow \mathbb{Y}$ para resolver el problema $f$ con datos $x \in \mathbb{X}$, donde $\mathbb{X}$ es un espacio vectorial con norma definida y $\mathbb{Y}$ es otro espacio vectorial con una norma definida. La implantación del algoritmo $\hat{f}$ en una máquina conduce a considerar: * Errores por redondeo: $$fl(u) = u(1+\epsilon), |\epsilon| \leq \epsilon_{maq}, \forall u \in \mathbb{R}.$$ * Operaciones en un SPFN, $\mathcal{Fl}$. Por ejemplo para la suma: $$u \oplus v = fl(u+v) = (u + v)(1+\epsilon), |\epsilon|\leq \epsilon_{maq} \forall u,v \in \mathcal{Fl}.$$ Esto es, $\hat{f}$ depende de $x \in \mathbb{X}$ y $\epsilon_{maq}$: representación de los números reales en una máquina y operaciones entre ellos o aritmética de máquina. Ver nota: [1.2.Sistema_de_punto_flotante](https://github.com/ITAM-DS/analisis-numerico-computo-cientifico/blob/master/temas/I.computo_cientifico/1.2.Sistema_de_punto_flotante.ipynb). Al ejecutar $\hat{f}$ obtenemos una colección de números en el SPFN que pertenecen a $\mathbb{Y}$: $\hat{f}(x)$. Debido a las diferencias entre un problema con cantidades continuas y una máquina que trabaja con cantidades discretas, los algoritmos numéricos **no** son exactos para **cualquier** elección de datos $x \in \mathbb{X}$. Esto es, los algoritmos **no** cumplen que la cantidad: $$\frac{||\hat{f}(x)-f(x)||}{||f(x)||}$$ dependa únicamente de errores por redondeo al evaluar $f$ $\forall x \in \mathbb{X}$. En notación matemática: $$\frac{||\hat{f}(x)-f(x)||}{||f(x)||} \leq K \epsilon_{maq} \forall x \in \mathbb{X}$$ con $K > 0$ no se cumple en general. La razón de lo anterior tiene que ver con cuestiones en la implantación de $\hat{f}$ como el número de iteraciones, la representación de $x$ en un SPFN o el mal condicionamiento de $f$. Así, a los algoritmos en el análisis numérico, se les pide una condición menos estricta que la anterior y más bien satisfagan lo que se conoce como **estabilidad**. Se dice que un algoritmo $\hat{f}$ para un problema $f$ es **estable** si: $$\forall x \in \mathbb{X}, \frac{||\hat{f}(x)-f(\hat{x})||}{||f(\hat{x})||} \leq K_1\epsilon_{maq}, K_1>0$$ para $\hat{x} \in \mathbb{X}$ tal que $\frac{||x-\hat{x}||}{||x||} \leq K_2\epsilon_{maq}, K_2>0$. Esto es, $\hat{f}$ resuelve un problema cercano para datos cercanos (cercano en el sentido del $\epsilon_{maq}$) independientemente de la elección de $x$. **Obs:** obsérvese que esta condición es más flexible y en general $K_1, K_2$ dependen de las dimensiones de $\mathbb{X},\mathbb{Y}$. **Comentarios:** * Esta definición resulta apropiada para la mayoría de los problemas en el ánalisis numérico. Para otras áreas, por ejemplo en ecuaciones diferenciales, donde se tienen definiciones de sistemas dinámicos estables e inestables (cuyas definiciones no se deben confundir con las descritas para algoritmos), esta condición es muy estricta. * Tenemos algoritmos que satisfacen una condición más estricta y simple que la estabilidad: **estabilidad hacia atrás**: ### Estabilidad hacia atrás Decimos que un algoritmo $\hat{f}$ para el problema $f$ es **estable hacia atrás** si: $$\forall x \in \mathbb{X}, \hat{f}(x) = f(\hat{x})$$ con $\hat{x} \in \mathbb{X}$ tal que $\frac{||x-\hat{x}||}{||x||} \leq K\epsilon_{maq}, K>0$. Esto es, el algoritmo $\hat{f}$ da la solución **exacta** para datos cercanos (cercano en el sentido de $\epsilon_{maq}$), independientemente de la elección de $x$. **Comentario:** Para entender la estabilidad hacia atrás de un algoritmo, considérese el ejemplo siguiente. **Problema:** evaluar $f(x) = e^x$ en $x=1$. **Resultado:** $f(1) = e^1 = 2.718281...$. ``` import math x=1 math.exp(x) ``` **Algoritmo:** truncar la serie $1 + x + \frac{x^2}{2} + \frac{x^3}{6} + \dots $ a cuatro términos: $\hat{f}(x) = 1 + x + \frac{x^2}{2} + \frac{x^3}{6}$. **Resultado del algoritmo:** $\hat{f}(1) = 2.\bar{6}$ ``` algoritmo = lambda x: 1 + x + x**2/2.0 + x**3/6.0 algoritmo(1) ``` **Pregunta:** ¿Qué valor $\hat{x} \in \mathbb{R}$ hace que el valor calculado por el algoritmo $\hat{f}(1)$ sea igual a $f(\hat{x})$? -> **Solución:** Resolver la ecuación: $e^{\hat{x}} = 2.\bar{6}$, esto es: $\hat{x} = log(2.\bar{6}) = 0.980829...$. Entonces $f(\hat{x}) = 2.\bar{6} = \hat{f}(x)$. ``` x_hat = math.log(algoritmo(1)) x_hat ``` Entonces, el algoritmo es estable hacia atrás sólo si la diferencia entre $x$ y $\hat{x}$ en términos relativos es menor a $K \epsilon_{maq}$ con $K >0$. Además, podemos calcular **errores hacia delante** y **errores hacia atrás**: error hacia delante: $\hat{f}(x) - f(x) = -0.05161...$, error hacia atrás: $\hat{x}-x = -0.01917...$. ``` err_delante = algoritmo(x) - math.exp(x) err_delante err_atras = x_hat-x err_atras ``` Dependiendo del problema estos errores son pequeños o grandes, por ejemplo si consideramos tener una cifra correcta como suficiente para determinar que es una buena aproximación entonces podemos concluir: $\hat{f}$ obtiene una respuesta correcta y cercana al valor de $f$ (error hacia delante) y la respuesta que obtuvimos con $\hat{f}$ es correcta para datos ligeramente perturbados (error hacia atrás). **Obs:** * Obsérvese que el error hacia delante requiere resolver el problema $f$ (para calcular $f(x)$) y de información sobre $f$. * En el ejemplo anterior se calculó $\hat{f}(x)$ y se calculó qué tan larga debe ser la modificación en los datos $x$, esto es: $\hat{x}$, para que $\hat{f}(x) = f(\hat{x})$ (error hacia atrás). * Dibujo que ayuda a ver errores hacia atrás y hacia delante: <img src="https://dl.dropboxusercontent.com/s/b30awajxvl3u8qe/error_hacia_delante_hacia_atras.png?dl=0" heigth="500" width="500"> En resumen, algunas características de un método **estable** numéricamente respecto al redondeo son: * Variaciones "pequeñas" en los datos de entrada del método generan variaciones "pequeñas" en la solución del problema. * No amplifican errores de redondeo en los cálculos involucrados. * Resuelven problemas "cercanos" para datos ligeramente modificados. # 1.3.1 Número de condición de una matriz En el curso trabajaremos con algoritmos matriciales que son numéricamente estables (o estables hacia atrás) ante errores por redondeo, sin embargo la exactitud que obtengamos con tales algoritmos dependerán de qué tan bien (o mal) condicionado esté el problema. En el caso de matrices la condición de un problema puede ser cuantificada con el **número de condición** de la matriz del problema. Aunque haciendo uso de definiciones como la pseudoinversa de una matriz es posible definir el número de condición para una matriz en general rectangular $A \in \mathbb{R}^{m\times n}$, en esta primera definición consideramos matrices cuadradas no singulares $A \in \mathbb{R}^{n\times n}$: $$\text{cond}(A) = ||A|| ||A^{-1}||.$$ **Obs:** obsérvese que la norma anterior es una **norma matricial** y cond$(\cdot)$ puede calcularse para diferentes normas matriciales. Ver final de esta nota para definición de norma y propiedades. ## ¿Por qué se utiliza la expresión $||A|| ||A^{-1}||$ para definir el número de condición de una matriz? Esta pregunta tiene que ver con el hecho que tal expresión aparece frecuentemente en problemas típicos de matrices. Para lo anterior considérese los siguientes problemas $f$: 1.Sean $A \in \mathbb{R}^{n\times n}$ no singular, $x \in \mathbb{R}^n$ y $f$ el problema de realizar la multiplicación $Ax$ para $x$ fijo, esto es: $f: \mathbb{R}^n \rightarrow \mathbb{R}^n$ dada por $f(x) = Ax$. Considérese una perturbación en $x: \hat{x} = x + \Delta x$, entonces: $$\text{Cond}_f^R = \frac{\text{ErrRel}(f(\hat{x}))}{\text{ErrRel}(\hat{x})} = \frac{\frac{||f(\hat{x})-f(x)||}{||f(x)||}}{\frac{||x-\hat{x}||}{||x||}} \approx \frac{||\mathcal{J}_f(x)||||x||}{||f(x)||}.$$ Para este problema tenemos: $$\frac{||\mathcal{J}_f(x)||||x||}{||f(x)||} = \frac{||A|| ||x||}{||Ax||}.$$ Si las normas matriciales utilizadas en el número de condición son consistentes (ver final de esta nota para definición de norma y propiedades) entonces: $$||x|| = ||A^{-1}Ax|| \leq ||A^{-1}||||Ax|| \therefore \frac{||x||}{||Ax||} \leq ||A^{-1}||$$ y se tiene: $$\text{Cond}_f^R \leq ||A|| ||A^{-1}||.$$ 2.Sean $f: \mathbb{R}^n \rightarrow \mathbb{R}, A \in \mathbb{R}^{n\times n}$ no singular. Considérese el problema de calcular $f(b) = A^{-1}b$ para $b \in \mathbb{R}^n$ fijo y la perturbación $\hat{b} = b + \Delta b$ entonces bajo las suposiciones del ejemplo anterior: $$\text{Cond}_f^R \approx \frac{||A^{-1}|| ||b||}{||A^{-1}b||}.$$ Si las normas matriciales utilizadas en el número de condición son consistentes (ver final de esta nota para definición de norma y propiedades) entonces: $$||b|| = ||AA^{-1}b|| \leq ||A|| ||A^{-1}b|| \therefore \text{Cond}_f^R \leq ||A^{-1}|| ||A||.$$ 3.Sean $f: \mathbb{R}^{n\times n} \rightarrow \mathbb{R}^n, A \in \mathbb{R}^{n\times n}$ no singular $b \in \mathbb{R}^n$ fijo. Considérese el problema de calcular la solución $x$ del sistema $Az=b$, esto es, calcular: $x = f(A) = A^{-1}b.$ Además, considérese la perturbación $\hat{A} = A + \Delta A$ en tal sistema $Az = b$. Se tiene: $$\hat{x} = \hat{A}^{-1}b,$$ donde: $\hat{x} = x + \Delta x$ (si se perturba $A$ entonces se perturba también $x$). De la ecuación anterior como $\hat{x} = \hat{A}^{-1}b$ se tiene: $$\hat{A}\hat{x} = b$$ $$(A+\Delta A)(x+\Delta x) = b$$ $$Ax + A \Delta x + \Delta Ax + \Delta A \Delta x = b$$ $$b + A \Delta x + \Delta A x = b$$ Donde en esta última ecuación se supuso que $\Delta A \Delta x \approx 0$ y de aquí: $$A \Delta x + \Delta A x \approx 0 \therefore \Delta x \approx - A^{-1} \Delta A x.$$ Entonces se tiene que la condición del problema $f$ calcular la solución de sistema de ecuaciones lineales $Az=b$ con $A$ no singular ante perturbaciones en $A$ es: $$\text{Cond}_f^R = \frac{\frac{||x-\hat{x}||}{||x||}}{\frac{||A-\hat{A}||}{||A||}}=\frac{\frac{||\Delta x||}{||x||}}{\frac{||\Delta A||}{||A||}} \leq \frac{\frac{||A^{-1}||||\Delta Ax||}{||x||}}{\frac{||\Delta A||}{||A||}} \leq ||A^{-1}||||A||.$$ ## ¿Qué está midiendo el número de condición de una matriz respecto a un sistema de ecuaciones lineales? El número de condición de una matriz mide la **sensibilidad** de la solución de un sistema de ecuaciones lineales ante perturbaciones en los datos de entrada (en la matriz del sistema $A$ o en el lado derecho $b$). Si pequeños cambios en los datos de entrada generan grandes cambios en la solución tenemos un **sistema mal condicionado**. Si pequeños cambios en los datos de entrada generan pequeños cambios en la solución tenemos un sistema **bien condicionado**. Lo anterior puede apreciarse con los siguientes ejemplos y gráficas: ``` import numpy as np import matplotlib.pyplot as plt import scipy import pprint ``` 1.Resolver los siguientes sistemas: $$a) \begin{array}{ccc} x_1 +2x_2 &= & 10 \\ 1.1x_1 + 2x_2 &= & 10.4 \end{array} $$ $$b)\begin{array}{ccc} 1.05x_1 +2x_2 &= & 10 \\ 1.1x_1 + 2x_2 &= & 10.4\end{array} $$ ``` print('inciso a') A = np.array([[1, 2], [1.1, 2]]) b = np.array([10,10.4]) print('matriz A:') pprint.pprint(A) print('lado derecho b:') pprint.pprint(b) x=np.linalg.solve(A,b) print('solución x:') pprint.pprint(x) x=np.arange(0,10,.5) recta1 = lambda x: 1/2.0*(10-1*x) recta2 = lambda x: 1/2.0*(10.4-1.1*x) plt.plot(x,recta1(x),'o-',x,recta2(x),'^-') plt.title('Sistema mal condicionado') plt.legend(('x1+2x2=10','1.1x1+2x2=10.4')) plt.grid(True) plt.show() ``` **Obs:** obsérvese que las dos rectas anteriores tienen una inclinación (pendiente) similar por lo que no se ve claramente el punto en el que intersectan. ``` print('inciso b') A = np.array([[1.05, 2], [1.1, 2]]) b = np.array([10,10.4]) print('matriz A ligeramente modificada:') pprint.pprint(A) print('lado derecho b:') pprint.pprint(b) x=np.linalg.solve(A,b) print('solución x:') pprint.pprint(x) x=np.arange(0,10,.5) recta1 = lambda x: 1/2.0*(10-1.05*x) recta2 = lambda x: 1/2.0*(10.4-1.1*x) plt.plot(x,recta1(x),'o-',x,recta2(x),'^-') plt.title('Sistema mal condicionado') plt.legend(('1.05x1+2x2=10','1.1x1+2x2=10.4')) plt.grid(True) plt.show() ``` **Obs:** al modificar un poco las entradas de la matriz $A$ la solución del sistema cambia drásticamente. **Comentario:** otra forma de describir a un sistema mal condicionado es que un amplio rango de valores en un SPFN satisfacen tal sistema de forma aproximada. 2.Resolver los siguientes sistemas: $$a) \begin{array}{ccc} .03x_1 + 58.9x_2 &= & 59.2 \\ 5.31x_1 -6.1x_2 &= & 47 \end{array} $$ $$a) \begin{array}{ccc} .03x_1 + 58.9x_2 &= & 59.2 \\ 5.31x_1 -6.05x_2 &= & 47 \end{array} $$ ``` print('inciso a') A = np.array([[.03, 58.9], [5.31, -6.1]]) b = np.array([59.2,47]) print('matriz A:') pprint.pprint(A) print('lado derecho b:') pprint.pprint(b) x=np.linalg.solve(A,b) print('solución x:') pprint.pprint(x) x=np.arange(4,14,.5) recta1 = lambda x: 1/58.9*(59.2-.03*x) recta2 = lambda x: 1/6.1*(5.31*x-47) plt.plot(x,recta1(x),'o-',x,recta2(x),'^-') plt.title('Sistema bien condicionado') plt.legend(('.03x1+58.9x2=59.2','5.31x1-6.1x2=47')) plt.grid(True) plt.show() ``` **Obs:** obsérvese que la solución del sistema de ecuaciones (intersección entre las dos rectas) está claramente definido. ``` print('inciso b') A = np.array([[.03, 58.9], [5.31, -6.05]]) b = np.array([59.2,47]) print('matriz A ligeramente modificada:') pprint.pprint(A) print('lado derecho b:') pprint.pprint(b) x=np.linalg.solve(A,b) print('solución x:') pprint.pprint(x) x=np.arange(4,14,.5) recta1 = lambda x: 1/58.9*(59.2-.03*x) recta2 = lambda x: 1/6.05*(5.31*x-47) plt.plot(x,recta1(x),'o-',x,recta2(x),'^-') plt.title('Sistema bien condicionado') plt.legend(('.03x1+58.9x2=59.2','5.31x1-6.05x2=47')) plt.grid(True) plt.show() ``` **Obs:** al modificar un poco las entradas de la matriz $A$ la solución **no** cambia mucho. **Comentarios:** 1.¿Por qué nos interesa considerar perturbaciones en los datos de entrada? -> recuérdese que los números reales se representan en la máquina mediante el sistema de punto flotante (SPF), entonces al ingresar datos a la máquina tenemos perturbaciones y por tanto errores de redondeo. Ver nota: [1.2.Sistema_de_punto_flotante](https://github.com/ITAM-DS/analisis-numerico-computo-cientifico/blob/master/temas/I.computo_cientifico/1.2.Sistema_de_punto_flotante.ipynb) 2.Las matrices anteriores tienen número de condición distinto: ``` print('matriz del ejemplo 1') A = np.array([[1, 2], [1.1, 2]]) pprint.pprint(A) ``` su número de condición es: ``` np.linalg.cond(A) print('matriz del ejemplo 2') A = np.array([[.03, 58.9], [5.31, -6.1]]) pprint.pprint(A) ``` su número de condición es: ``` np.linalg.cond(A) ``` Las matrices del ejemplo $1$ y $2$ son **medianamente** condicionadas. Una matriz se dice **bien condicionada** si cond$(A)$ es cercano a $1$. ## Algunas propiedades del número de condición de una matriz * Si $A \in \mathbb{R}^{n\times n}$ es no singular entonces: $$\frac{1}{\text{cond}(A)} = \min \left\{ \frac{||A-B||}{||A||} \mathrel{}\middle|\mathrel{} B \text{ es singular}, ||\cdot|| \text{ es una norma inducida} \right\}.$$ esto es, una matriz mal condicionada (número de condición grande) se le puede aproximar muy bien por una matriz singular. Sin embargo, el mal condicionamiento no necesariamente se relaciona con singularidad. Una matriz singular es mal condicionada pero una matriz mal condicionada no necesariamente es singular. Considérese por ejemplo la matriz de **Hilbert**: ``` from scipy.linalg import hilbert hilbert(4) np.linalg.cond(hilbert(4)) ``` la cual es una matriz mal condicionada pero es no singular: ``` np.linalg.inv(hilbert(4))@hilbert(4) ``` y otro ejemplo de una matriz singular: ``` print('matriz singular') A = np.array([[1, 2], [1, 2]]) pprint.pprint(A) np.linalg.inv(A) np.linalg.cond(A) ``` * Para las normas matriciales inducidas se tiene: * cond$(A)\geq 1, \forall A \in \mathbb{R}^{n\times n}$. * cond$(\gamma A) = \text{cond}(A), \forall \gamma \in \mathbb{R}-\{0\}, \forall A \in \mathbb{R}^{n\times n}$. * cond$_2(A) = ||A||_2||A^{-1}||_2 = \frac{\sigma_{\max}}{\sigma_{\min}}, \sigma_{\min} \neq 0$. * En el problema: resolver $Ax = b$ se cumple: $$\text{ErrRel}(\hat{x}) = \frac{||x^*-\hat{x}||}{||x^*||} \leq \text{cond}(A) \left ( \frac{||\Delta A||}{||A||} + \frac{||\Delta b||}{||b||} \right ), b \neq 0.$$ donde: $x^*$ es solución de $Ax=b$ y $\hat{x}$ es solución aproximada que se obtiene por algún método numérico (por ejemplo factorización LU). $\frac{||\Delta A||}{||A||}, \frac{||\Delta b||}{||b||}$ son los errores relativos en las entradas de $A$ y $b$ respectivamente. **Comentario:** la desigualdad anterior se puede interpretar como sigue: si sólo tenemos perturbaciones en $A$ de modo que se tienen errores por redondeo del orden de $10^{-k}$ y por lo tanto $k$ dígitos de precisión en $A$ y cond$(A)$ es del orden de $10^c$ entonces $\text{ErrRel}(\hat{x})$ puede llegar a tener errores de redondeo de a lo más del orden de $10^{c-k}$ y por tanto $k-c$ dígitos de precisión: $$\text{ErrRel}(\hat{x}) \leq \text{cond}(A) \frac{||\Delta A||}{||A||}.$$ * Supongamos que $x^*$ es solución del sistema $Ax=b$ y obtenemos $\hat{x}$ por algún método numérico (por ejemplo factorización LU) entonces ¿qué condiciones garantizan que $||x^*-\hat{x}||$ sea cercano a cero (del orden de $ \epsilon_{maq}= 10^{-16}$), ¿de qué depende esto? Para responder las preguntas anteriores definimos el residual de $Ax=b$ como $$r=A\hat{x}-b$$ con $\hat{x}$ aproximación a $x^*$ obtenida por algún método numérico. Asimismo, el residual relativo a la norma de $b$ como: $$\frac{||r||}{||b||}.$$ **Obs:** típicamente $x^*$ (solución exacta) es desconocida y por ello no podríamos calcular $||x^*-\hat{x}||$, sin embargo sí podemos calcular el residual relativo a la norma de $b$: $\frac{||r||}{||b||}$. ¿Se cumple que $\frac{||r||}{||b||}$ pequeño implica $\text{ErrRel}(\hat{x})$ pequeño? El siguiente resultado nos ayuda a responder esta y las preguntas anteriores: Sea $A \in \mathbb{R}^{n\times n}$ no singular, $x^*$ solución de $Ax=b$, $\hat{x}$ aproximación a $x^*$, entonces para las normas matriciales inducidas se cumple: $$\frac{||r||}{||b||} \frac{1}{\text{cond}(A)} \leq \frac{||x^*-\hat{x}||}{||x^*||}\leq \text{cond}(A)\frac{||r||}{||b||}.$$ Por la desigualdad anterior, si $\text{cond}(A) \approx 1$ entonces $\frac{||r||}{||b||}$ es una buena estimación de $\text{ErrRel}(\hat{x}) = \frac{||x^*-\hat{x}||}{||x^*||}$ por lo que $\hat{x}$ es una buena estimación de $x^*$. Si $\text{cond}(A)$ es grande no podemos decir **nada** acerca de $\text{ErrRel}(\hat{x})$ ni de $\hat{x}$. **Ejemplos:** 1. $$a) \begin{array}{ccc} x_1 + x_2 &= & 2 \\ 10.05x_1 + 10x_2 &= & 21 \end{array} $$ $$b) \begin{array}{ccc} x_1 + x_2 &= & 2 \\ 10.1x_1 + 10x_2 &= & 21 \end{array} $$ ``` print('inciso a') A_1 = np.array([[1, 1], [10.05, 10]]) b_1 = np.array([2,21]) print('matriz A_1:') pprint.pprint(A_1) print('lado derecho b_1:') pprint.pprint(b_1) x_est=np.linalg.solve(A_1,b_1) print('solución x_est:') pprint.pprint(x_est) print('inciso b') A_2 = np.array([[1, 1], [10.1, 10]]) b_2 = np.array([2,21]) print('matriz A_2:') pprint.pprint(A_2) print('lado derecho b_2:') pprint.pprint(b_2) x_hat=np.linalg.solve(A_2,b_2) print('solución x_hat:') pprint.pprint(x_hat) print('residual relativo:') r_rel = np.linalg.norm(A_1@x_hat-b_1)/np.linalg.norm(b_1) r_rel print('error relativo:') err_rel = np.linalg.norm(x_hat-x_est)/np.linalg.norm(x_est) pprint.pprint(err_rel) ``` **no tenemos una buena estimación del error relativo a partir del residual relativo pues:** ``` np.linalg.cond(A_1) ``` De acuerdo a la cota del resultado el error relativo se encuentra en el intervalo: ``` (r_rel*1/np.linalg.cond(A_1), r_rel*np.linalg.cond(A_1)) ``` 2. $$a) \begin{array}{ccc} 4.1x_1 + 2.8x_2 &= & 4.1 \\ 9.7x_1 + 6.6x_2 &= & 9.7 \end{array}$$ $$b) \begin{array}{ccc} 4.1x_1 + 2.8x_2 &= & 4.11 \\ 9.7x_1 + 6.6x_2 &= & 9.7 \end{array}$$ ``` print('inciso a') A_1 = np.array([[4.1, 2.8], [9.7, 6.6]]) b_1 = np.array([4.1,9.7]) print('matriz A_1:') pprint.pprint(A_1) print('lado derecho b_1:') pprint.pprint(b_1) x_est=np.linalg.solve(A_1,b_1) print('solución x_est:') pprint.pprint(x_est) print('inciso b') A_2 = np.array([[4.1, 2.8], [9.7, 6.6]]) b_2 = np.array([4.11,9.7]) print('matriz A_2:') pprint.pprint(A_2) print('lado derecho b_2:') pprint.pprint(b_2) x_hat=np.linalg.solve(A_2,b_2) print('solución x_hat:') pprint.pprint(x_hat) print('residual relativo:') r_rel = np.linalg.norm(A_1@x_hat-b_1)/np.linalg.norm(b_1) r_rel print('error relativo:') err_rel = np.linalg.norm(x_hat-x_est)/np.linalg.norm(x_est) pprint.pprint(err_rel) ``` **no tenemos una buena estimación del error relativo a partir del residual relativo pues:** ``` np.linalg.cond(A_1) (r_rel*1/np.linalg.cond(A_1), r_rel*np.linalg.cond(A_1)) ``` 3. $$a) \begin{array}{ccc} 3.9x_1 + 11.6x_2 &= & 5.5 \\ 12.8x_1 + 2.9x_2 &= & 9.7 \end{array}$$ $$b) \begin{array}{ccc} 3.95x_1 + 11.6x_2 &= & 5.5 \\ 12.8x_1 + 2.9x_2 &= & 9.7 \end{array}$$ ``` print('inciso a') A_1 = np.array([[3.9, 11.6], [12.8, 2.9]]) b_1 = np.array([5.5,9.7]) print('matriz A_1:') pprint.pprint(A_1) print('lado derecho b_1:') pprint.pprint(b_1) x_est=np.linalg.solve(A_1,b_1) print('solución x_est:') pprint.pprint(x_est) print('inciso b') A_2 = np.array([[3.95, 11.6], [12.8, 2.9]]) b_2 = np.array([5.5,9.7]) print('matriz A_2:') pprint.pprint(A_2) print('lado derecho b_2:') pprint.pprint(b_2) x_hat=np.linalg.solve(A_2,b_2) print('solución x_hat:') pprint.pprint(x_hat) print('residual relativo:') r_rel = np.linalg.norm(A_1@x_hat-b_1)/np.linalg.norm(b_1) r_rel print('error relativo:') err_rel = np.linalg.norm(x_hat-x_est)/np.linalg.norm(x_est) pprint.pprint(err_rel) ``` **sí tenemos una buena estimación del error relativo a partir del residual relativo pues:** ``` np.linalg.cond(A_1) (r_rel*1/np.linalg.cond(A_1), r_rel*np.linalg.cond(A_1)) ``` 3. $\theta=\frac{\pi}{3}$ ``` theta_1=math.pi/3 (math.cos(theta_1),math.sin(theta_1)) theta_2 = math.pi/3 + .00005 theta_2 (math.cos(theta_2),math.sin(theta_2)) ``` $$a) \begin{array}{ccc} \cos(\theta_1)x_1 - \sin(\theta_1)x_2 &= & -1.5 \\ \sin(\theta_1)x_1 + \cos(\theta_1)x_2 &= & 2.4 \end{array}$$ $$b) \begin{array}{ccc} \cos(\theta_2)x_1 - \sin(\theta_2)x_2 &= & -1.5 \\ \sin(\theta_2)x_1 + \cos(\theta_2)x_2 &= & 2.4 \end{array}$$ $$c) \begin{array}{ccc} \cos(\theta_2)x_1 - \sin(\theta_2)x_2 &= & -1.7 \\ \sin(\theta_2)x_1 + \cos(\theta_2)x_2 &= & 2.4 \end{array}$$ ``` print('inciso a') A_1 = np.array([[math.cos(theta_1), -math.sin(theta_1)], [math.sin(theta_1), math.cos(theta_1)]]) b_1 = np.array([-1.5,2.4]) print('matriz A_1:') pprint.pprint(A_1) print('lado derecho b_1:') pprint.pprint(b_1) x_est=np.linalg.solve(A_1,b_1) print('solución x_est:') pprint.pprint(x_est) print('inciso b') A_2 = np.array([[math.cos(theta_2), -math.sin(theta_2)], [math.sin(theta_2), math.cos(theta_2)]]) b_2 = np.array([-1.5,2.4]) print('matriz A_2:') pprint.pprint(A_2) print('lado derecho b_2:') pprint.pprint(b_2) x_hat=np.linalg.solve(A_2,b_2) print('solución x_hat:') pprint.pprint(x_hat) print('residual relativo:') r_rel = np.linalg.norm(A_1@x_hat-b_1)/np.linalg.norm(b_1) '{:0.10e}'.format(r_rel) print('error relativo:') err_rel = np.linalg.norm(x_hat-x_est)/np.linalg.norm(x_est) '{:0.10e}'.format(err_rel) ``` **sí tenemos una buena estimación del error relativo a partir del residual relativo pues:** ``` np.linalg.cond(A_1) ('{:0.10e}'.format(r_rel*1/np.linalg.cond(A_1)), '{:0.10e}'.format(r_rel*np.linalg.cond(A_1))) print('inciso c') A_2 = np.array([[math.cos(theta_2), -math.sin(theta_2)], [math.sin(theta_2), math.cos(theta_2)]]) b_2 = np.array([-1.7,2.4]) print('matriz A_2:') pprint.pprint(A_2) print('lado derecho b_2:') pprint.pprint(b_2) x_hat=np.linalg.solve(A_2,b_2) print('solución x_hat:') pprint.pprint(x_hat) print('residual relativo:') r_rel = np.linalg.norm(A_1@x_hat-b_1)/np.linalg.norm(b_1) '{:0.14e}'.format(r_rel) print('error relativo:') err_rel = np.linalg.norm(x_hat-x_est)/np.linalg.norm(x_est) '{:0.14e}'.format(err_rel) ``` **sí tenemos una buena estimación del error relativo a partir del residual relativo pues:** ``` np.linalg.cond(A_1) ('{:0.14e}'.format(r_rel*1/np.linalg.cond(A_1)), '{:0.14e}'.format(r_rel*np.linalg.cond(A_1))) ``` Así, $\text{cond}(A)$ nos da una calidad (mediante $\frac{||r||}{||b||}$) de la solución $\hat{x}$ en el problema inicial (resolver $Ax=b$) obtenida por algún método numérico respecto a la solución $x^*$ de $Ax=b$. **Obs:** Por último obsérvese que la condición del problema inicial (resolver $Ax=b$) **no depende del método númerico** que se elige para resolverlo. **Ejercicio:** proponer sistemas de ecuaciones lineales con distinto número de condición, perturbar matriz del sistema o lado derecho (o ambos) y revisar números de condición y residuales relativos de acuerdo a la cota: $$\frac{||r||}{||b||} \frac{1}{\text{cond}(A)} \leq \frac{||x^*-\hat{x}||}{||x^*||}\leq \text{cond}(A)\frac{||r||}{||b||}.$$ Verificar que si el número de condición del sistema es pequeño entonces el residual relativo estima bien al error relativo. ## Número de condición de una matriz $A \in \mathbb{R}^{m\times n}$ Para este caso se utiliza la **pseudoinversa** de $A$ definida a partir de la descomposición en valores singulares compacta (compact SVD, ver [3.2.2.Factorizaciones_matriciales_SVD_Cholesky_QR](https://www.dropbox.com/s/s4ch0ww1687pl76/3.2.2.Factorizaciones_matriciales_SVD_Cholesky_QR.pdf?dl=0)) y denotada como $A^{\dagger}$: $$A^{\dagger} = V \Sigma^{\dagger} U^T$$ donde: $\Sigma ^{\dagger}$ es la matriz transpuesta de $\Sigma$ y tiene entradas $\sigma_i^{+}:$ $$\sigma_i^+ = \begin{cases} \frac{1}{\sigma_i} &\text{ si } \sigma_i \neq 0,\\ 0 &\text{ en otro caso} \end{cases} $$ $\forall i=1,\dots, r$ con $r=rank(A)$. **Comentarios y propiedades:** * $A^{\dagger}$ se le conoce como pseudoinversa de $Moore-Penrose$. * Si $rank(A)=n$ entonces $A^{\dagger} = (A^TA)^{-1}A^T$, si $rank(A)=m$, $A^\dagger = A^T(AA^T)^{-1}$, si $A\in \mathbb{R}^{n\times n}$ no singular, entonces $A^\dagger=A^{-1}$. * Con $A^\dagger$ se define $\text{cond}(A)$ para $A \in \mathbb{R}^{m\times n}$: $$\text{cond}(A) = ||A||||A^\dagger||$$ de hecho, se tiene: $$\text{cond}_2(A) = \frac{\sigma_{max}}{\sigma_{min}}=\frac{\sigma_1}{\sigma_r}.$$ --- ## Norma Una norma define una medida de distancia en un conjunto y da nociones de tamaño, vecindad, convergencia y continuidad. ### Normas vectoriales Sea $\mathbb{R}^n$ el conjunto de $n$-tuplas o vectores columna o $1$-arreglo de orden $1$, esto es: $$x \in \mathbb{R}^n \iff x = \left[\begin{array}{c} x_1\\ x_2\\ \vdots\\ x_n \end{array} \right] \text{ con } x_i \in \mathbb{R}$$ Una norma vectorial en $\mathbb{R}^n$ es una función $g: \mathbb{R}^n \rightarrow \mathbb{R}$ que satisface las siguientes propiedades: * $g$ es no negativa: $g(x) \geq 0 \forall x \in \mathbb{R}^n$. * $g$ es definida: $g(x) = 0 \iff x = 0$. * $g$ satisface la desigualdad del triángulo: $$g(x+y) \leq g(x) + g(y) \forall x,y \in \mathbb{R}^n.$$ * $g$ es homogénea: $g(\alpha x)=|\alpha|g(x), \forall \alpha \in \mathbb{R}, \forall x \in \mathbb{R}^n$. Notación: $g(x) = ||x||$. **Comentarios y propiedades:** * Una norma es una generalización del valor absoluto de $\mathbb{R}$: $|x|, x \in \mathbb{R}.$ * Un espacio vectorial con una norma definida en éste se le llama **espacio vectorial normado**. * Una norma es una medida de la longitud de un vector. * Con una norma es posible definir conceptos como distancia entre vectores: $x,y \in \mathbb{R}^n: \text{dist}(x,y) = ||x-y||$. * Existen varias normas en $\mathbb{R}^n$ siendo las más comunes: * La norma $\mathcal{l}_2$, Euclidiana o norma $2$: $||x||_2$. * La norma $\mathcal{l}_1$ o norma $1$: $||x||_1$. * La norma $\infty$ o de Chebyshev o norma infinito: $||x||_\infty$. Las normas anteriores pertenecen a una familia parametrizada por una constante $p, p \geq 1$ cuyo nombre es norma $\mathcal{l}_p$: $$ ||x||_p = \left(\displaystyle \sum_{i=1}^n|x_i|^p \right )^{1/p}.$$ * Un resultado para $x \in \mathbb{R}^n$ es la **equivalencia** entre normas: $$\exists \alpha, \beta > 0 \text{ tales que }: \alpha||x||_a \leq ||x||_b \leq \beta ||x||_a \forall x \in \mathbb{R}^n$$ donde: $||\cdot||_a, ||\cdot||_b$ son normas cualesquiera en $\mathbb{R}^n$. Por la propiedad anterior decimos que si se cumple convergencia en la norma $||\cdot||_a$ entonces también se cumple convergencia en la norma $||\cdot||_b$. **Ejemplos de gráficas en el plano:** Norma $2$: $\{ x \in \mathbb{R}^2 \text{ tales que } ||x||_2 \leq 1\}$ ``` f=lambda x: np.sqrt(x[:,0]**2 + x[:,1]**2) #definición de norma2 density=1e-5 density_p=int(2.5*10**3) x=np.arange(-1,1,density) y1=np.sqrt(1-x**2) y2=-np.sqrt(1-x**2) x_p=np.random.uniform(-1,1,(density_p,2)) ind=f(x_p)<=1 x_p_subset=x_p[ind] plt.plot(x,y1,'b',x,y2,'b') plt.scatter(x_p_subset[:,0],x_p_subset[:,1],marker='.') plt.title('Puntos en el plano que cumplen ||x||_2 <= 1') plt.grid() plt.show() ``` Norma $1$: $\{ x \in \mathbb{R}^2 \text{ tales que } ||x||_1 \leq 1\}$ ``` f=lambda x:np.abs(x[:,0]) + np.abs(x[:,1]) #definición de norma1 density=1e-5 density_p=int(2.5*10**3) x1=np.arange(0,1,density) x2=np.arange(-1,0,density) y1=1-x1 y2=1+x2 y3=x1-1 y4=-1-x2 x_p=np.random.uniform(-1,1,(density_p,2)) ind=f(x_p)<=1 x_p_subset=x_p[ind] plt.plot(x1,y1,'b',x2,y2,'b',x1,y3,'b',x2,y4,'b') plt.scatter(x_p_subset[:,0],x_p_subset[:,1],marker='.') plt.title('Puntos en el plano que cumplen ||x||_1 <= 1') plt.grid() plt.show() ``` Norma $\infty$: $\{ x \in \mathbb{R}^2 \text{ tales que } ||x||_\infty \leq 1\}$ ``` f=lambda x:np.max(np.abs(x),axis=1) #definición de norma infinito density_p=int(2.5*10**3) x_p=np.random.uniform(-1,1,(density_p,2)) ind=f(x_p)<=1 x_p_subset=x_p[ind] plt.scatter(x_p_subset[:,0],x_p_subset[:,1],marker='.') plt.title('Puntos en el plano que cumplen ||x||_inf <= 1') plt.grid() plt.show() ``` ->La norma $\infty$ se encuentra en esta familia como límite: $$||x||_\infty = \displaystyle \lim_{p \rightarrow \infty} ||x||_p.$$ ->En la norma $\mathcal{l}_2$ o Euclidiana $||x||_2$ tenemos una desigualdad muy importante, la desigualdad de **Cauchy-Schwartz**: $$|x^Ty| \leq ||x||_2||y||_2 \forall x,y \in \mathbb{R}^n$$ la cual relaciona el producto interno estándar para $x,y \in \mathbb{R}^n$: $<x,y> = x^Ty = \displaystyle \sum_{i=1}^nx_iy_i$ con la norma $\mathcal{l}_2$ de $x$ y la norma $\mathcal{l}_2$ de $y$. Además se utiliza lo anterior para definir el ángulo (sin signo) entre $x,y$: $$\measuredangle x,y = \cos ^{-1}\left(\frac{x^Ty}{||x||_2||y||_2} \right )$$ para $\cos^{-1}(u) \in [0,\pi]$ y se nombra a $x,y$ ortogonales si $x^Ty=0$. Obsérvese que $||x||_2 = \sqrt{x^Tx}$. * También se utilizan matrices* para definir normas *Matriz: arreglo $2$-dimensional de datos o $2$ arreglo de orden $2$. Se utiliza la notación $A \in \mathbb{R}^{m\times n}$ para denotar: $$A = \left[\begin{array}{cccc} a_{11} &a_{12}&\dots&a_{1n}\\ a_{21} &a_{22}&\dots&a_{2n}\\ \vdots &\vdots& \vdots&\vdots\\ a_{n1} &a_{n2}&\dots&a_{nn}\\ \vdots &\vdots& \vdots&\vdots\\ a_{m-11} &a_{m-12}&\dots&a_{m-1n}\\ a_{m1} &a_{m2}&\dots&a_{mm} \end{array} \right] $$ *$a_{ij} \mathbb{R} \forall i=1,\dots,m, j=1,\dots,n$. *$A=(a_1,\dots a_n), a_j \in \mathbb{R}^m (=\mathbb{R}^{m\times1}) \forall j=1,\dots,n$. *$A=\left ( \begin{array}{c} a_1^T\\ \vdots\\ a_m^T \end{array} \right ), a_i \in \mathbb{R}^n (=\mathbb{R}^{n\times1}) \forall i=1,\dots,m$. Entonces un ejemplo de norma-$2$ ponderada es: $\{x \in \mathbb{R}^2 \text{ tales que } ||x||_D \leq 1, ||x||_D = ||Dx||_2, \text{con matriz diagonal } D\}$: ``` d1=1/5 d2=1/3 f=lambda x: np.sqrt((d1*x[:,0])**2 + (d2*x[:,1])**2) #definición de norma2 density=1e-5 density_p=int(2.5*10**3) x=np.arange(-1/d1,1/d1,density) y1=1.0/d2*np.sqrt(1-(d1*x)**2) y2=-1.0/d2*np.sqrt(1-(d1*x)**2) x_p=np.random.uniform(-1/d1,1/d1,(density_p,2)) ind=f(x_p)<=1 x_p_subset=x_p[ind] plt.plot(x,y1,'b',x,y2,'b') plt.scatter(x_p_subset[:,0],x_p_subset[:,1],marker='.') plt.title('Puntos en el plano que cumplen ||x||_D <= 1') plt.grid() plt.show() ``` en este caso $D=\left[\begin{array}{cc} \frac{1}{5} &0\\ 0 &\frac{1}{3} \end{array}\right ]$ ## Normas matriciales La multiplicación de una matriz de tamaño $m\times n$ por un vector se define como: $$y=Ax=\displaystyle \sum_{j=1}^n \alpha_jx_j$$ con $a_j \in \mathbb{R}^m, x \in \mathbb{R}^n$. Obsérvese que $x \in \mathbb{R}^n, Ax \in \mathbb{R}^m$. **Inducidas** De las normas matriciales más importantes se encuentran las **inducidas** por normas vectoriales. Estas normas matriciales se definen en términos de los vectores en $\mathbb{R}^n$ a los que se les aplica la multiplicación $Ax$: Dadas las normas vectoriales $||\cdot||_{(n)}, ||\cdot||_{(m)}$ en $\mathbb{R}^n$ y $\mathbb{R}^m$ respectivamente, la norma matricial inducida $||A||_{(m,n)}$ para $A \in \mathbb{R}^{m \times n}$ es el **menor número** $C$ para el cual la desigualdad: $$||Ax||_{(m)} \leq C||x||_{(n)}$$ se cumple $\forall x \in \mathbb{R}^n$. Esto es: $$||A||_{(m,n)} = \displaystyle \sup_{x \in \mathbb{R}^n} \frac{||Ax||_{(m)}}{||x||_{(n)}}$$ **Comentarios:** * $||A||_{(m,n)}$ representa el **máximo** factor por el cual $A$ puede modificar el tamaño de $x$ sobre todos los vectores $x \in \mathbb{R}^n$, es una medida de un tipo de **worst case stretch factor**. * Así definidas, la norma $||\cdot||_{(m,n)}$ es la norma matricial inducida por las normas vectoriales $||\cdot||_{(m)}, ||\cdot||_{(n)}$. * Son definiciones equivalentes: $$||A||_{(m,n)} = \displaystyle \sup_{x \in \mathbb{R}^n} \frac{||Ax||_{(m)}}{||x||_{(n)}} = \displaystyle \sup_{||x||_{(n)} \leq 1} \frac{||Ax||_{(m)}}{||x||_{(n)}} = \displaystyle \sup_{||x||_{(n)}=1} ||Ax||_{(m)}$$ **Ejemplo:** La matriz $A=\left[\begin{array}{cc} 1 &2\\ 0 &2 \end{array}\right ]$ mapea $\mathbb{R}^2$ a $\mathbb{R}^2$, en particular se tiene: * $A$ mapea $e_1 = \left[\begin{array}{c} 1 \\ 0 \end{array}\right ]$ a la columna $a_1 = \left[\begin{array}{c} 1 \\ 0 \end{array}\right ]$ de $A$. * $A$ mapea $e_2 = \left[\begin{array}{c} 0 \\ 1 \end{array}\right ]$ a la columna $a_2 = \left[\begin{array}{c} 2 \\ 2 \end{array}\right ]$ de $A$. Considerando $||A||_p := ||A||_{(p,p)}$ con $p=1, p=2, p=\infty$ se tiene: <img src="https://dl.dropboxusercontent.com/s/3fqz9uspfwdurjf/normas_matriciales.png?dl=0" heigth="500" width="500"> **Comentario:** al observar la segunda gráfica se tiene la siguiente afirmación: la acción de una matriz sobre una circunferencia es una elipse con longitudes de semiejes iguales a $|d_i|$. En general la acción de una matriz sobre una hiper esfera es una hiperelipse. Por lo que los vectores unitarios en $\mathbb{R}^n$ que son más amplificados por la acción de una matriz diagonal $D \in \mathbb{R}^{m\times n}$ con entradas iguales a $d_i$ son aquellos que se mapean a los semiejes de una hiperelipse en $\mathbb{R}^m$ de longitud igual a $\max\{|d_i|\}$ y así tenemos: si $D$ es una matriz diagonal con entradas $|d_i|$ entonces $||D||_2 = \displaystyle \max_{i=1,\dots,m}\{|d_i|\}$. **Ejemplo con Python para la norma $1$:** ``` A=np.array([[1,2],[0,2]]) density=1e-5 x1=np.arange(0,1,density) x2=np.arange(-1,0,density) x1_y1 = np.column_stack((x1,1-x1)) x2_y2 = np.column_stack((x2,1+x2)) x1_y3 = np.column_stack((x1,x1-1)) x2_y4 = np.column_stack((x2,-1-x2)) apply_A = lambda vec : np.transpose(A@np.transpose(vec)) A_to_vector_1 = apply_A(x1_y1) A_to_vector_2 = apply_A(x2_y2) A_to_vector_3 = apply_A(x1_y3) A_to_vector_4 = apply_A(x2_y4) plt.subplot(1,2,1) plt.plot(x1_y1[:,0],x1_y1[:,1],'b', x2_y2[:,0],x2_y2[:,1],'b', x1_y3[:,0],x1_y3[:,1],'b', x2_y4[:,0],x2_y4[:,1],'b') e1 = np.column_stack((np.repeat(0,len(x1)),x1)) plt.plot(e1[:,0],e1[:,1],'g') plt.xlabel('Vectores con norma 1 menor o igual a 1') plt.grid() plt.subplot(1,2,2) plt.plot(A_to_vector_1[:,0],A_to_vector_1[:,1],'b', A_to_vector_2[:,0],A_to_vector_2[:,1],'b', A_to_vector_3[:,0],A_to_vector_3[:,1],'b', A_to_vector_4[:,0],A_to_vector_4[:,1],'b') A_to_vector_e1 = apply_A(e1) plt.plot(A_to_vector_e1[:,0],A_to_vector_e1[:,1],'g') plt.grid() plt.title('Efecto de la matriz A sobre los vectores con norma 1 menor o igual a 1') plt.show() np.linalg.norm(A,1) ``` **Ejercicio:** obtener las otras dos gráficas con Python usando norma $2$ y norma $\infty$. **Resultados computacionales que es posible probar:** 1. $||A||_1 = \displaystyle \max_{j=1,\dots,n}\sum_{i=1}^n|a_{ij}|$. 2. $||A||_\infty = \displaystyle \max_{i=1,\dots,n}\sum_{j=1}^n|a_{ij}|$. 3. $||A||_2 = \sqrt{\lambda_{max}(A^TA)} = \max \left \{\sqrt{\lambda}\in \mathbb{R} | \lambda \text{ es eigenvalor de } A^TA \right \} = max \left \{ \sigma \in \mathbb{R} | \sigma \text{ es valor singular de A } \right \} = \sigma_{max}(A)$. por ejemplo para la matriz anterior se tiene: ``` np.linalg.norm(A,2) _,s,_ = np.linalg.svd(A) np.max(s) ``` **Otras normas matriciales:** * Norma de Frobenius: $||A||_F = \text{tr}(A^TA)^{1/2} = \left ( \displaystyle \sum_{i=1}^m \sum_{j=1}^n a_{ij}^2 \right ) ^{1/2}$. * Norma "sum-absolute-value": $||A||_{sav} = \displaystyle \sum_{i=1}^m \sum_{j=1}^n |a_{ij}|$. * Norma "max-absolute-value": $||A||_{mav} = \displaystyle \max \left\{|a_{ij}| \text{ para } i=1,\dots,m , j=1,\dots,n \right \}$. **Comentarios:** * El producto interno estándar en $\mathbb{R}^{m\times n}$ es: $<A,B> = tr(A^TB) = \displaystyle \sum_{i=1}^m \sum_{j=1}^n a_{ij}b_{ij}$. * La norma $2$ (también llamada norma espectral o $\mathcal{l}_2$) y la norma de Frobenius cumplen la propiedad de **consistencia**: $$||Ax|| \leq ||A|| ||x|| \forall x \in \mathbb{R}^n, \forall A \in \mathbb{R}^{m\times n}.$$ $$||AB|| \leq ||A|| ||B|| \forall A,B \text{ matrices con dimensiones correspondientes para su multiplicación}.$$ **Obs:** de hecho esta propiedad de consistencia también es cumplida por las normas-$p$ matriciales. ## Nota sobre $\sup$ Si $C \subseteq \mathbb{R}$ entonces $a \subseteq \mathbb{R}$ es una **cota superior** en $C$ si $$ x \leq a, \forall x \in C.$$ En $\mathbb{R}$ el conjunto de cotas superiores es $\emptyset, \mathbb{R}$ ó un intervalo de la forma $[b,\infty]$. En el último caso, $b$ se llama **mínima cota superior o supremo del conjunto** $C$ y se denota $\sup C$. Por convención $\sup\emptyset = -\infty$ y $\sup C=\infty$ si $C$ no es acotado por arriba. **Obs:** si $C$ es finito, $\sup C$ es el máximo de los elementos de $C$ y típicamente se denota como $\max C$. Análogamente, $a \in \mathbb{R}$ es una **cota inferior** en $C \subseteq \mathbb{R}$ si $$a \leq x, \forall x \in C.$$ El **ínfimo o máxima cota inferior** de $C$ es $\inf C = -\sup (-C)$. Por convención $\inf \emptyset = \infty$ y si $C$ no es acotado por debajo entonces $\inf C = -\infty$. **Obs:** si $C$ es finito, $\inf C$ es el mínimo de sus elementos y se denota como $\min C$. **Ejercicios** 1. Resuelve los ejercicios y preguntas de la nota. **Preguntas de comprehensión** 1)¿Qué factores influyen en la falta de exactitud de un cálculo? 2)Menciona $5$ propiedades que un conjunto debe cumplir para que sea considerado un espacio vectorial. 3)Menciona las propiedades que debe cumplir una función para que se considere una norma. 4)¿Qué es una norma matricial inducida?, ¿qué mide una norma matricial inducida? 5)¿La norma de Frobenius, es una norma matricial inducida? 6)¿A qué son iguales $\text{sup}(\emptyset)$, $\text{inf}(\emptyset)$ ? (el conjunto $\emptyset$ es el conjunto vacío) 7)Si f es un problema mal condicionado, ¿a qué nos referimos? Da ejemplos de problemas bien y mal condicionados. 8)Si f es un problema que resolvemos con un algoritmo g, ¿qué significa: a. que g sea estable? b. que g sea estable hacia atrás? c. que g sea inestable? 9)¿Qué ventaja(s) se tiene(n) al calcular un error hacia atrás vs calcular un error hacia delante? **Referencias** 1. Nota [1.2.Sistema_de_punto_flotante](https://github.com/ITAM-DS/analisis-numerico-computo-cientifico/blob/master/temas/I.computo_cientifico/1.2.Sistema_de_punto_flotante.ipynb) 2. L. Trefethen, D. Bau, Numerical linear algebra, SIAM, 1997. 3. G. H. Golub, C. F. Van Loan,Matrix Computations. John Hopkins University Press, 2013
github_jupyter
docker run --rm -v <ruta a mi directorio>:/datos --name jupyterlab_local -p 8888:8888 -d palmoreck/jupyterlab:1.1.0 docker stop jupyterlab_local %pip install -q --user numpy matplotlib scipy import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) import math x=1 math.exp(x) algoritmo = lambda x: 1 + x + x**2/2.0 + x**3/6.0 algoritmo(1) x_hat = math.log(algoritmo(1)) x_hat err_delante = algoritmo(x) - math.exp(x) err_delante err_atras = x_hat-x err_atras import numpy as np import matplotlib.pyplot as plt import scipy import pprint print('inciso a') A = np.array([[1, 2], [1.1, 2]]) b = np.array([10,10.4]) print('matriz A:') pprint.pprint(A) print('lado derecho b:') pprint.pprint(b) x=np.linalg.solve(A,b) print('solución x:') pprint.pprint(x) x=np.arange(0,10,.5) recta1 = lambda x: 1/2.0*(10-1*x) recta2 = lambda x: 1/2.0*(10.4-1.1*x) plt.plot(x,recta1(x),'o-',x,recta2(x),'^-') plt.title('Sistema mal condicionado') plt.legend(('x1+2x2=10','1.1x1+2x2=10.4')) plt.grid(True) plt.show() print('inciso b') A = np.array([[1.05, 2], [1.1, 2]]) b = np.array([10,10.4]) print('matriz A ligeramente modificada:') pprint.pprint(A) print('lado derecho b:') pprint.pprint(b) x=np.linalg.solve(A,b) print('solución x:') pprint.pprint(x) x=np.arange(0,10,.5) recta1 = lambda x: 1/2.0*(10-1.05*x) recta2 = lambda x: 1/2.0*(10.4-1.1*x) plt.plot(x,recta1(x),'o-',x,recta2(x),'^-') plt.title('Sistema mal condicionado') plt.legend(('1.05x1+2x2=10','1.1x1+2x2=10.4')) plt.grid(True) plt.show() print('inciso a') A = np.array([[.03, 58.9], [5.31, -6.1]]) b = np.array([59.2,47]) print('matriz A:') pprint.pprint(A) print('lado derecho b:') pprint.pprint(b) x=np.linalg.solve(A,b) print('solución x:') pprint.pprint(x) x=np.arange(4,14,.5) recta1 = lambda x: 1/58.9*(59.2-.03*x) recta2 = lambda x: 1/6.1*(5.31*x-47) plt.plot(x,recta1(x),'o-',x,recta2(x),'^-') plt.title('Sistema bien condicionado') plt.legend(('.03x1+58.9x2=59.2','5.31x1-6.1x2=47')) plt.grid(True) plt.show() print('inciso b') A = np.array([[.03, 58.9], [5.31, -6.05]]) b = np.array([59.2,47]) print('matriz A ligeramente modificada:') pprint.pprint(A) print('lado derecho b:') pprint.pprint(b) x=np.linalg.solve(A,b) print('solución x:') pprint.pprint(x) x=np.arange(4,14,.5) recta1 = lambda x: 1/58.9*(59.2-.03*x) recta2 = lambda x: 1/6.05*(5.31*x-47) plt.plot(x,recta1(x),'o-',x,recta2(x),'^-') plt.title('Sistema bien condicionado') plt.legend(('.03x1+58.9x2=59.2','5.31x1-6.05x2=47')) plt.grid(True) plt.show() print('matriz del ejemplo 1') A = np.array([[1, 2], [1.1, 2]]) pprint.pprint(A) np.linalg.cond(A) print('matriz del ejemplo 2') A = np.array([[.03, 58.9], [5.31, -6.1]]) pprint.pprint(A) np.linalg.cond(A) from scipy.linalg import hilbert hilbert(4) np.linalg.cond(hilbert(4)) np.linalg.inv(hilbert(4))@hilbert(4) print('matriz singular') A = np.array([[1, 2], [1, 2]]) pprint.pprint(A) np.linalg.inv(A) np.linalg.cond(A) print('inciso a') A_1 = np.array([[1, 1], [10.05, 10]]) b_1 = np.array([2,21]) print('matriz A_1:') pprint.pprint(A_1) print('lado derecho b_1:') pprint.pprint(b_1) x_est=np.linalg.solve(A_1,b_1) print('solución x_est:') pprint.pprint(x_est) print('inciso b') A_2 = np.array([[1, 1], [10.1, 10]]) b_2 = np.array([2,21]) print('matriz A_2:') pprint.pprint(A_2) print('lado derecho b_2:') pprint.pprint(b_2) x_hat=np.linalg.solve(A_2,b_2) print('solución x_hat:') pprint.pprint(x_hat) print('residual relativo:') r_rel = np.linalg.norm(A_1@x_hat-b_1)/np.linalg.norm(b_1) r_rel print('error relativo:') err_rel = np.linalg.norm(x_hat-x_est)/np.linalg.norm(x_est) pprint.pprint(err_rel) np.linalg.cond(A_1) (r_rel*1/np.linalg.cond(A_1), r_rel*np.linalg.cond(A_1)) print('inciso a') A_1 = np.array([[4.1, 2.8], [9.7, 6.6]]) b_1 = np.array([4.1,9.7]) print('matriz A_1:') pprint.pprint(A_1) print('lado derecho b_1:') pprint.pprint(b_1) x_est=np.linalg.solve(A_1,b_1) print('solución x_est:') pprint.pprint(x_est) print('inciso b') A_2 = np.array([[4.1, 2.8], [9.7, 6.6]]) b_2 = np.array([4.11,9.7]) print('matriz A_2:') pprint.pprint(A_2) print('lado derecho b_2:') pprint.pprint(b_2) x_hat=np.linalg.solve(A_2,b_2) print('solución x_hat:') pprint.pprint(x_hat) print('residual relativo:') r_rel = np.linalg.norm(A_1@x_hat-b_1)/np.linalg.norm(b_1) r_rel print('error relativo:') err_rel = np.linalg.norm(x_hat-x_est)/np.linalg.norm(x_est) pprint.pprint(err_rel) np.linalg.cond(A_1) (r_rel*1/np.linalg.cond(A_1), r_rel*np.linalg.cond(A_1)) print('inciso a') A_1 = np.array([[3.9, 11.6], [12.8, 2.9]]) b_1 = np.array([5.5,9.7]) print('matriz A_1:') pprint.pprint(A_1) print('lado derecho b_1:') pprint.pprint(b_1) x_est=np.linalg.solve(A_1,b_1) print('solución x_est:') pprint.pprint(x_est) print('inciso b') A_2 = np.array([[3.95, 11.6], [12.8, 2.9]]) b_2 = np.array([5.5,9.7]) print('matriz A_2:') pprint.pprint(A_2) print('lado derecho b_2:') pprint.pprint(b_2) x_hat=np.linalg.solve(A_2,b_2) print('solución x_hat:') pprint.pprint(x_hat) print('residual relativo:') r_rel = np.linalg.norm(A_1@x_hat-b_1)/np.linalg.norm(b_1) r_rel print('error relativo:') err_rel = np.linalg.norm(x_hat-x_est)/np.linalg.norm(x_est) pprint.pprint(err_rel) np.linalg.cond(A_1) (r_rel*1/np.linalg.cond(A_1), r_rel*np.linalg.cond(A_1)) theta_1=math.pi/3 (math.cos(theta_1),math.sin(theta_1)) theta_2 = math.pi/3 + .00005 theta_2 (math.cos(theta_2),math.sin(theta_2)) print('inciso a') A_1 = np.array([[math.cos(theta_1), -math.sin(theta_1)], [math.sin(theta_1), math.cos(theta_1)]]) b_1 = np.array([-1.5,2.4]) print('matriz A_1:') pprint.pprint(A_1) print('lado derecho b_1:') pprint.pprint(b_1) x_est=np.linalg.solve(A_1,b_1) print('solución x_est:') pprint.pprint(x_est) print('inciso b') A_2 = np.array([[math.cos(theta_2), -math.sin(theta_2)], [math.sin(theta_2), math.cos(theta_2)]]) b_2 = np.array([-1.5,2.4]) print('matriz A_2:') pprint.pprint(A_2) print('lado derecho b_2:') pprint.pprint(b_2) x_hat=np.linalg.solve(A_2,b_2) print('solución x_hat:') pprint.pprint(x_hat) print('residual relativo:') r_rel = np.linalg.norm(A_1@x_hat-b_1)/np.linalg.norm(b_1) '{:0.10e}'.format(r_rel) print('error relativo:') err_rel = np.linalg.norm(x_hat-x_est)/np.linalg.norm(x_est) '{:0.10e}'.format(err_rel) np.linalg.cond(A_1) ('{:0.10e}'.format(r_rel*1/np.linalg.cond(A_1)), '{:0.10e}'.format(r_rel*np.linalg.cond(A_1))) print('inciso c') A_2 = np.array([[math.cos(theta_2), -math.sin(theta_2)], [math.sin(theta_2), math.cos(theta_2)]]) b_2 = np.array([-1.7,2.4]) print('matriz A_2:') pprint.pprint(A_2) print('lado derecho b_2:') pprint.pprint(b_2) x_hat=np.linalg.solve(A_2,b_2) print('solución x_hat:') pprint.pprint(x_hat) print('residual relativo:') r_rel = np.linalg.norm(A_1@x_hat-b_1)/np.linalg.norm(b_1) '{:0.14e}'.format(r_rel) print('error relativo:') err_rel = np.linalg.norm(x_hat-x_est)/np.linalg.norm(x_est) '{:0.14e}'.format(err_rel) np.linalg.cond(A_1) ('{:0.14e}'.format(r_rel*1/np.linalg.cond(A_1)), '{:0.14e}'.format(r_rel*np.linalg.cond(A_1))) f=lambda x: np.sqrt(x[:,0]**2 + x[:,1]**2) #definición de norma2 density=1e-5 density_p=int(2.5*10**3) x=np.arange(-1,1,density) y1=np.sqrt(1-x**2) y2=-np.sqrt(1-x**2) x_p=np.random.uniform(-1,1,(density_p,2)) ind=f(x_p)<=1 x_p_subset=x_p[ind] plt.plot(x,y1,'b',x,y2,'b') plt.scatter(x_p_subset[:,0],x_p_subset[:,1],marker='.') plt.title('Puntos en el plano que cumplen ||x||_2 <= 1') plt.grid() plt.show() f=lambda x:np.abs(x[:,0]) + np.abs(x[:,1]) #definición de norma1 density=1e-5 density_p=int(2.5*10**3) x1=np.arange(0,1,density) x2=np.arange(-1,0,density) y1=1-x1 y2=1+x2 y3=x1-1 y4=-1-x2 x_p=np.random.uniform(-1,1,(density_p,2)) ind=f(x_p)<=1 x_p_subset=x_p[ind] plt.plot(x1,y1,'b',x2,y2,'b',x1,y3,'b',x2,y4,'b') plt.scatter(x_p_subset[:,0],x_p_subset[:,1],marker='.') plt.title('Puntos en el plano que cumplen ||x||_1 <= 1') plt.grid() plt.show() f=lambda x:np.max(np.abs(x),axis=1) #definición de norma infinito density_p=int(2.5*10**3) x_p=np.random.uniform(-1,1,(density_p,2)) ind=f(x_p)<=1 x_p_subset=x_p[ind] plt.scatter(x_p_subset[:,0],x_p_subset[:,1],marker='.') plt.title('Puntos en el plano que cumplen ||x||_inf <= 1') plt.grid() plt.show() d1=1/5 d2=1/3 f=lambda x: np.sqrt((d1*x[:,0])**2 + (d2*x[:,1])**2) #definición de norma2 density=1e-5 density_p=int(2.5*10**3) x=np.arange(-1/d1,1/d1,density) y1=1.0/d2*np.sqrt(1-(d1*x)**2) y2=-1.0/d2*np.sqrt(1-(d1*x)**2) x_p=np.random.uniform(-1/d1,1/d1,(density_p,2)) ind=f(x_p)<=1 x_p_subset=x_p[ind] plt.plot(x,y1,'b',x,y2,'b') plt.scatter(x_p_subset[:,0],x_p_subset[:,1],marker='.') plt.title('Puntos en el plano que cumplen ||x||_D <= 1') plt.grid() plt.show() A=np.array([[1,2],[0,2]]) density=1e-5 x1=np.arange(0,1,density) x2=np.arange(-1,0,density) x1_y1 = np.column_stack((x1,1-x1)) x2_y2 = np.column_stack((x2,1+x2)) x1_y3 = np.column_stack((x1,x1-1)) x2_y4 = np.column_stack((x2,-1-x2)) apply_A = lambda vec : np.transpose(A@np.transpose(vec)) A_to_vector_1 = apply_A(x1_y1) A_to_vector_2 = apply_A(x2_y2) A_to_vector_3 = apply_A(x1_y3) A_to_vector_4 = apply_A(x2_y4) plt.subplot(1,2,1) plt.plot(x1_y1[:,0],x1_y1[:,1],'b', x2_y2[:,0],x2_y2[:,1],'b', x1_y3[:,0],x1_y3[:,1],'b', x2_y4[:,0],x2_y4[:,1],'b') e1 = np.column_stack((np.repeat(0,len(x1)),x1)) plt.plot(e1[:,0],e1[:,1],'g') plt.xlabel('Vectores con norma 1 menor o igual a 1') plt.grid() plt.subplot(1,2,2) plt.plot(A_to_vector_1[:,0],A_to_vector_1[:,1],'b', A_to_vector_2[:,0],A_to_vector_2[:,1],'b', A_to_vector_3[:,0],A_to_vector_3[:,1],'b', A_to_vector_4[:,0],A_to_vector_4[:,1],'b') A_to_vector_e1 = apply_A(e1) plt.plot(A_to_vector_e1[:,0],A_to_vector_e1[:,1],'g') plt.grid() plt.title('Efecto de la matriz A sobre los vectores con norma 1 menor o igual a 1') plt.show() np.linalg.norm(A,1) np.linalg.norm(A,2) _,s,_ = np.linalg.svd(A) np.max(s)
0.226014
0.904903
# 概率统计/Probability and Statistics ``` import numpy as np from scipy.stats import norm import matplotlib.pyplot as plt ``` 下面应该是本章重点的”蒙特卡洛积分“了,不过,因为蒙特卡洛是一种基于概率统计的方法,所以这里把直接相关的一些内容也来复习一下。 ## Random Variables, Expectation, and Variance - Cumulative Distribution Function: $F(x) = P(X \leq x)$ - $P(X > a) = 1-P(X \leq a) = 1-F(a)$ - Interval Probabilities: $P(a < X \leq b) = F(b)-F(a)$ - Linearity of Expectation: $E(aX+b) = aE(X)+b$ - $E(X+Y) = E(X)+E(Y)$ , $E(X-Y) = E(X)-E(Y)$ - Variance: $V(X+Y) = V(X)+V(Y)+2(E(XY)-EX \cdot EY)$ - Variance: Expected squared difference between X and its mean - $V(X) = E[(X-\mu)^2]$ - $V(X)=E[X^2]-E[X]^2$ - Standard deviation: $\sigma_X = +\sqrt{V(x)}$ - Addition: $V(X+b) = V(X)$ - Scaling: $V(aX) = a^2 V(X)$, $\sigma_{aX} = |a| \sigma_x$ - Affine Transformation: $V(aX+b)=a^2 V(X)$ ## Distribution Families ### Discrete Distribution Families | Distribution | Notation | PMF | Mean:$\mu$ | Variance | CDF:$F(n)$ | | -- | -- | -- | -- | -- | -- | | Bernoulli | $$X \sim B_p$$ | $$p$$ | $$p$$ | $$pq$$| | | Binomial | $$X \sim B_{p,n}(k)$$ | $$\binom{n}{k} p^k q^{n-k}$$ | $$np$$ | $$npq$$ | | | Poisson | $$X \sim P_{\lambda}(k)$$ | $$e^{-\lambda} \frac{{\lambda}^k}{k!}$$ | $$\lambda$$ | $$\lambda$$ | | | Geometric | $$X \sim G_p(n)$$ | $$p \cdot q^{n-1}$$ | $$\frac{1}{p}$$ | $$\frac{q}{p^2}$$ | $$1-q^n$$ | ### Continuous Distribution Families | Distribution | Notation | PDF | Mean:$\mu$ | Variance | CDF:$F(n)$ | | -- | -- | -- | -- | -- | -- | | Uniform | $$X \sim U_[a,b]$$ | $$\frac{1}{b-a}$$ | $$\frac{a+b}{2}$$ | $$\frac{(b-a)^2}{12}$$ | $$\frac{x-a}{b-a}$$ | | Exponential | $$X \sim f_{\lambda}$$ | $$\lambda e^{-\lambda x}, x \geq 0 $$ | $$\frac{1}{\lambda}$$ | $$\frac{1}{\lambda^2}$$ | $$1-e^{-\lambda x}, x \geq 0$$ | ### 正态分布 - 标准正态分布: $Z \sim \mathcal{N}(0, 1)$ $$ Z \sim \mathcal{N}(0, 1) \\ P(Z \leq a) = \Phi(a) \\ P(Z \geq a) = 1-\Phi(a) \\ P(a \le Z \le b) = \Phi(b) - \Phi(a) $$ - 一般正态分布: $X\sim\mathcal{N}(\mu,\sigma^2)$ $$ f_X(x) = \frac{1}{\sqrt{2\pi\sigma^2}} e ^{-(x-\mu)^2/(2 \sigma^2)} $$ ``` x = np.linspace(-4,4,100) y = norm.pdf(x) plt.figure(figsize=[9,9]) plt.title("Normal Distribution") plt.plot(x,y) plt.grid() plt.show() ``` ## 大数定理/Law of Large Numbers - N个独立的随机变量,样本数量无穷时,"converges in probability" to $\mu$ ## Statistics and Parameter Estimation - Sample mean: $\overline{X} = \frac{1}{n} \sum_{i=1}^n X_i$ - Unbiased Variance Estimation with Bessel’s Correction: $S^2 = \frac{1}{n-1} \sum_{i=1}^n (Xi - \overline{X})^2$ - $V(\overline{X}) = \frac{\sigma^2}{n} \qquad \sigma_{\overline{X}}=\frac{\sigma}{\sqrt{n}}$ ## 参考资料
github_jupyter
import numpy as np from scipy.stats import norm import matplotlib.pyplot as plt x = np.linspace(-4,4,100) y = norm.pdf(x) plt.figure(figsize=[9,9]) plt.title("Normal Distribution") plt.plot(x,y) plt.grid() plt.show()
0.409929
0.99259
# The Power of Plotly: A Better Approach to Python Data Visualization Visualization is essential to communicating results in data science. While it takes years of studying and training to understand standard deviations and cross-validation, anyone can easily comprehend a chart or graph. In this post, I'll compare Matplotlib, the bread-and-butter data visualization package in Python, with Plotly, an interactive one. By visualizing the same dataset with both, I'll argue that Plotly is a superior alternative. ## The Matplotlib Version Let's say we want to plot the locations of seismographic stations in the Himalayas. After much searching and consternation, we find a dataset at [TODO: Link to original source], which has four attributes for each station: 1. Latitude 2. Longitude 3. Elevation 4. Name (stored as a 4-character code) The most natural approach is a 2D scatterplot where each station is plotted at its longitude and latitude. We can use a colorbar to represent elevation. ``` '''Importing packages.''' import matplotlib.pyplot as plt from matplotlib import cm %matplotlib inline import numpy as np import pandas as pd from numpy import * import seaborn as sns sns.set_style("whitegrid") '''A function for reading the data. Typically we would insert a header row in a pandas dataframe to index data properly, but for visualization purposes we'll just extract everything and store each attribute as a list. This is not good practice!''' station_data = pd.read_csv('data/stations.txt', sep="\t", skiprows=1, header=None) all_lat = station_data.iloc[:][1] all_long = station_data.iloc[:][2] all_elev = np.array(station_data.iloc[:][3]) all_names = station_data.iloc[:][0] '''Scatter and annotate the data by station name. Size of each scattered point is (elevation+1)*35, because otherwise the lowest-elevation dots would be too small to be visible. This is a somewhat distorted picture, but the colormap reflects the true elevations.''' fig, ax = plt.subplots(figsize=[15, 12]) sc = ax.scatter(all_lat, all_long, s=100, c=all_elev, cmap = cm.viridis) for i in range(len(all_names)): ax.annotate(all_names[i], (all_lat[i], all_long[i])) '''Adding a colorbar.''' cbar = fig.colorbar(sc, ax=ax) cbar.set_label('Elevation (km)', fontsize=20) '''Labelling and saving the figure.''' ax.set_xlabel('Latitude', fontsize=20) ax.set_ylabel('Longitude', fontsize=20) ax.set_title('Seismographic Stations in the Himalayas', fontsize=25) plt.savefig('2D_matplotlib.png') fig.show() ``` This is okay, but not great. The colorbar scheme conveys only a vague sense of the elevation of each station, since it's hard to get a sense of whether a dot is purple-blue or midnight-blue or just blue. ## The 3D Matplotlib Version To make the elevation of each point clearer, let's say we try a 3D plot. We'll use the same axes as before, but now add an elevation axis for each point. ``` from mpl_toolkits.mplot3d import Axes3D fig = plt.figure(figsize=[15, 12]) ax = fig.add_subplot(111, projection='3d') ax.scatter3D(all_lat, all_long, all_elev, s=100, c=all_elev, cmap = cm.viridis) for i in range(len(all_names)): ax.annotate(all_names[i], (all_lat[i], all_long[i])) '''Adding a colorbar.''' cbar = fig.colorbar(sc, ax=ax) cbar.set_label('Elevation (km)', fontsize=20) '''Labelling and saving the figure.''' ax.set_xlabel('Latitude', fontsize=20) ax.set_ylabel('Longitude', fontsize=20) ax.set_zlabel('Elevation (km)', fontsize=20) ax.set_title('Seismographic Stations in the Himalayas', fontsize=25) fig.show() ``` This does a better job of conveying the elevation of each point, but at the cost of distorting the latitude-longitude relationships of the stations. When creating a 3D plot on a 2D screen, such tradeoffs are unavoidable. For the sake of stubbornness, let's try some more 3D plot styles. Here's a trisurf, which "connects the dots" to create a surface in 3D space. ``` fig = plt.figure(figsize=[15, 12]) ax = fig.gca(projection='3d') ax.plot_trisurf(all_lat, all_long, all_elev) '''Labelling and saving the figure.''' ax.set_xlabel('Latitude', fontsize=20) ax.set_ylabel('Longitude', fontsize=20) ax.set_zlabel('Altitude (km)', fontsize=20) ax.set_title('Seismographic Stations in the Himalayas', fontsize=25) fig.show() ``` Oof. How about a [TODO: OTHER STYLE]? ## Plotly, At Long Last Our forays into 3D plotting don't seem to be going anywhere. Let's try returning to the original question: How do we best plot stations by latitude, longitude, and altitude? Our very first 2D plot got most of the way there. The only issue was that representations of altitude were not precise enough, since they were based on the colorbar gradient. Plotly offers an easy solution in the form of an interactive plot. We can make the exact same scatterplot as before, but this time hovering over the scatterpoint can reveal its exact altitude and name. ``` import plotly.plotly as py import plotly.graph_objs as go '''Construct labels for the scatterplot. This is the text that shows up when hovering over a particular point.''' scatter_labels = [] for a, b in zip(all_elev, all_names): scatter_labels.append('Station: {}<br>Elevation: {} km '.format(b, a)) #<br> is a linebreak in Plotly trace0 = go.Scatter( x = all_lat, y = all_long, mode = 'markers', name = 'markers', marker=dict( size='16', color = all_elev, colorscale='Viridis', showscale=True #[TODO: ADD TITLE TO COLORBAR] ), text = scatter_labels ) scatter_layout = go.Layout( title= 'Seismographic Stations in the Himalayas', hovermode= 'closest', xaxis= dict( title= 'Latitude', ticklen= 5, zeroline= False, gridwidth= 2, ), yaxis=dict( title= 'Longitude', ticklen= 5, gridwidth= 2, ), showlegend= False ) fig = go.Figure(data=[trace0], layout=scatter_layout) py.iplot(fig, filename='himalayas') ``` Huzzah! This plot conveys the exact same information as the original Matplotlib version, with some obvious benefits. * Viewers can see the exact longitude, latitude, and elevation values when hovering over a point. * The hovering option obviates the need to plot names of stations next to the points, giving the plot a cleaner look. * The plot itself appears sharper and nicer looking (okay, maybe I'm biased on this one). What about 3D? ``` import plotly.plotly as py import plotly.graph_objs as go '''Construct labels for the scatterplot. This is the text that shows up when hovering over a particular point.''' scatter_labels = [] for a, b in zip(all_elev, all_names): scatter_labels.append('Station: {}<br>Elevation: {} km '.format(b, a)) #<br> is a linebreak in Plotly trace0 = go.Scatter3d( x = all_lat, y = all_long, z = all_elev, mode = 'markers', name = 'markers', marker=dict( size='8', color = all_elev, symbol='circle', colorscale='Viridis', showscale=True #[TODO: ADD TITLE TO COLORBAR] ), text = scatter_labels ) scatter_layout = go.Layout( title= 'Seismographic Stations in the Himalayas', hovermode= 'closest', xaxis= dict( title= 'Latitude', ticklen= 5, zeroline= False, gridwidth= 2, ), yaxis=dict( title= 'Longitude', ticklen= 5, gridwidth= 2, ), # zaxis=dict( TODO ADD Z-AXIS LABEL # title= 'Altitude (km)', # ticklen= 5, # gridwidth= 2, # ), showlegend= False ) fig = go.Figure(data=[trace0], layout=scatter_layout) py.iplot(fig, filename='himalayas-3d') ``` # Conclusion For quick and easy data exploration, Matplotlib gets the job done. But when making data visualizations for others, it pays to go the extra mile and create interactive visualizations. These are the best of both worlds: you can convey more information while reducing clutter. Thanks for reading! If you liked this post, please share any thoughts or comments below. [LinkedIn](https://www.linkedin.com/in/akhil-jalan-125b32103/) [Github](https://github.com/akhiljalan) ``` import plotly.plotly as py import plotly.graph_objs as go '''Construct labels for the scatterplot. This is the text that shows up when hovering over a particular point.''' scatter_labels = [] for a, b in zip(all_elev, all_names): scatter_labels.append('Station: {}<br>Elevation: {} km '.format(b, a)) #<br> is a linebreak in Plotly trace0 = go.Scatter( x = all_lat, y = all_long, mode = 'markers', name = 'markers', marker=dict( size='16', color = all_elev, colorscale='Viridis', showscale=True #[TODO: ADD TITLE TO COLORBAR] ), text = scatter_labels ) scatter_layout = go.Layout( title= 'Seismographic Stations in the Himalayas', hovermode= 'closest', xaxis= dict( title= 'Latitude', ticklen= 5, zeroline= False, gridwidth= 2, ), yaxis=dict( title= 'Longitude', ticklen= 5, gridwidth= 2, ), showlegend= False ) fig = go.Figure(data=[trace0], layout=scatter_layout) py.iplot(fig, filename='himalayas') ```
github_jupyter
'''Importing packages.''' import matplotlib.pyplot as plt from matplotlib import cm %matplotlib inline import numpy as np import pandas as pd from numpy import * import seaborn as sns sns.set_style("whitegrid") '''A function for reading the data. Typically we would insert a header row in a pandas dataframe to index data properly, but for visualization purposes we'll just extract everything and store each attribute as a list. This is not good practice!''' station_data = pd.read_csv('data/stations.txt', sep="\t", skiprows=1, header=None) all_lat = station_data.iloc[:][1] all_long = station_data.iloc[:][2] all_elev = np.array(station_data.iloc[:][3]) all_names = station_data.iloc[:][0] '''Scatter and annotate the data by station name. Size of each scattered point is (elevation+1)*35, because otherwise the lowest-elevation dots would be too small to be visible. This is a somewhat distorted picture, but the colormap reflects the true elevations.''' fig, ax = plt.subplots(figsize=[15, 12]) sc = ax.scatter(all_lat, all_long, s=100, c=all_elev, cmap = cm.viridis) for i in range(len(all_names)): ax.annotate(all_names[i], (all_lat[i], all_long[i])) '''Adding a colorbar.''' cbar = fig.colorbar(sc, ax=ax) cbar.set_label('Elevation (km)', fontsize=20) '''Labelling and saving the figure.''' ax.set_xlabel('Latitude', fontsize=20) ax.set_ylabel('Longitude', fontsize=20) ax.set_title('Seismographic Stations in the Himalayas', fontsize=25) plt.savefig('2D_matplotlib.png') fig.show() from mpl_toolkits.mplot3d import Axes3D fig = plt.figure(figsize=[15, 12]) ax = fig.add_subplot(111, projection='3d') ax.scatter3D(all_lat, all_long, all_elev, s=100, c=all_elev, cmap = cm.viridis) for i in range(len(all_names)): ax.annotate(all_names[i], (all_lat[i], all_long[i])) '''Adding a colorbar.''' cbar = fig.colorbar(sc, ax=ax) cbar.set_label('Elevation (km)', fontsize=20) '''Labelling and saving the figure.''' ax.set_xlabel('Latitude', fontsize=20) ax.set_ylabel('Longitude', fontsize=20) ax.set_zlabel('Elevation (km)', fontsize=20) ax.set_title('Seismographic Stations in the Himalayas', fontsize=25) fig.show() fig = plt.figure(figsize=[15, 12]) ax = fig.gca(projection='3d') ax.plot_trisurf(all_lat, all_long, all_elev) '''Labelling and saving the figure.''' ax.set_xlabel('Latitude', fontsize=20) ax.set_ylabel('Longitude', fontsize=20) ax.set_zlabel('Altitude (km)', fontsize=20) ax.set_title('Seismographic Stations in the Himalayas', fontsize=25) fig.show() import plotly.plotly as py import plotly.graph_objs as go '''Construct labels for the scatterplot. This is the text that shows up when hovering over a particular point.''' scatter_labels = [] for a, b in zip(all_elev, all_names): scatter_labels.append('Station: {}<br>Elevation: {} km '.format(b, a)) #<br> is a linebreak in Plotly trace0 = go.Scatter( x = all_lat, y = all_long, mode = 'markers', name = 'markers', marker=dict( size='16', color = all_elev, colorscale='Viridis', showscale=True #[TODO: ADD TITLE TO COLORBAR] ), text = scatter_labels ) scatter_layout = go.Layout( title= 'Seismographic Stations in the Himalayas', hovermode= 'closest', xaxis= dict( title= 'Latitude', ticklen= 5, zeroline= False, gridwidth= 2, ), yaxis=dict( title= 'Longitude', ticklen= 5, gridwidth= 2, ), showlegend= False ) fig = go.Figure(data=[trace0], layout=scatter_layout) py.iplot(fig, filename='himalayas') import plotly.plotly as py import plotly.graph_objs as go '''Construct labels for the scatterplot. This is the text that shows up when hovering over a particular point.''' scatter_labels = [] for a, b in zip(all_elev, all_names): scatter_labels.append('Station: {}<br>Elevation: {} km '.format(b, a)) #<br> is a linebreak in Plotly trace0 = go.Scatter3d( x = all_lat, y = all_long, z = all_elev, mode = 'markers', name = 'markers', marker=dict( size='8', color = all_elev, symbol='circle', colorscale='Viridis', showscale=True #[TODO: ADD TITLE TO COLORBAR] ), text = scatter_labels ) scatter_layout = go.Layout( title= 'Seismographic Stations in the Himalayas', hovermode= 'closest', xaxis= dict( title= 'Latitude', ticklen= 5, zeroline= False, gridwidth= 2, ), yaxis=dict( title= 'Longitude', ticklen= 5, gridwidth= 2, ), # zaxis=dict( TODO ADD Z-AXIS LABEL # title= 'Altitude (km)', # ticklen= 5, # gridwidth= 2, # ), showlegend= False ) fig = go.Figure(data=[trace0], layout=scatter_layout) py.iplot(fig, filename='himalayas-3d') import plotly.plotly as py import plotly.graph_objs as go '''Construct labels for the scatterplot. This is the text that shows up when hovering over a particular point.''' scatter_labels = [] for a, b in zip(all_elev, all_names): scatter_labels.append('Station: {}<br>Elevation: {} km '.format(b, a)) #<br> is a linebreak in Plotly trace0 = go.Scatter( x = all_lat, y = all_long, mode = 'markers', name = 'markers', marker=dict( size='16', color = all_elev, colorscale='Viridis', showscale=True #[TODO: ADD TITLE TO COLORBAR] ), text = scatter_labels ) scatter_layout = go.Layout( title= 'Seismographic Stations in the Himalayas', hovermode= 'closest', xaxis= dict( title= 'Latitude', ticklen= 5, zeroline= False, gridwidth= 2, ), yaxis=dict( title= 'Longitude', ticklen= 5, gridwidth= 2, ), showlegend= False ) fig = go.Figure(data=[trace0], layout=scatter_layout) py.iplot(fig, filename='himalayas')
0.38318
0.981221
# 深度概率编程库 MindSpore深度概率编程的目标是将深度学习和贝叶斯学习结合,包括概率分布、概率分布映射、深度概率网络、概率推断算法、贝叶斯层、贝叶斯转换和贝叶斯工具箱,面向不同的开发者。对于专业的贝叶斯学习用户,提供概率采样、推理算法和模型构建库;另一方面,为不熟悉贝叶斯深度学习的用户提供了高级的API,从而不用更改深度学习编程逻辑,即可利用贝叶斯模型。 ## 概率分布 概率分布(`mindspore.nn.probability.distribution`)是概率编程的基础。`Distribution` 类提供多样的概率统计接口,例如概率密度函数 *pdf* 、累积密度函数 *cdf* 、散度计算 *kl_loss* 、抽样 *sample* 等。现有的概率分布实例包括高斯分布,伯努利分布,指数型分布,几何分布和均匀分布。 ### 概率分布类 - `Distribution`:所有概率分布的基类。 - `Bernoulli`:伯努利分布。参数为试验成功的概率。 - `Exponential`:指数型分布。参数为率参数。 - `Geometric`:几何分布。参数为一次伯努利试验成功的概率。 - `Normal`:正态(高斯)分布。参数为均值和标准差。 - `Uniform`:均匀分布。参数为数轴上的最小值和最大值。 - `Categorical`:类别分布。每种类别出现的概率。 - `LogNormal`:对数正态分布。参数为位置参数和规模参数。 - `Gumbel`: 耿贝尔极值分布。参数为位置参数和规模参数。 - `Logistic`:逻辑斯谛分布。参数为位置参数和规模参数。 - `Cauchy`:柯西分布。参数为位置参数和规模参数。 #### Distribution基类 `Distribution` 是所有概率分布的基类。 接口介绍:`Distribution` 类支持的函数包括 `prob`、`log_prob`、`cdf`、`log_cdf`、`survival_function`、`log_survival`、`mean`、`sd`、`var`、`entropy`、`kl_loss`、`cross_entropy` 和 `sample` 。分布不同,所需传入的参数也不同。只有在派生类中才能使用,由派生类的函数实现决定参数。 - `prob` :概率密度函数(PDF)/ 概率质量函数(PMF)。 - `log_prob` :对数似然函数。 - `cdf` :累积分布函数(CDF)。 - `log_cdf` :对数累积分布函数。 - `survival_function` :生存函数。 - `log_survival` :对数生存函数。 - `mean` :均值。 - `sd` :标准差。 - `var` :方差。 - `entropy` :熵。 - `kl_loss` :Kullback-Leibler 散度。 - `cross_entropy` :两个概率分布的交叉熵。 - `sample` :概率分布的随机抽样。 - `get_dist_args` :概率分布在网络中使用的参数。 - `get_dist_type` :概率分布的类型。 #### 伯努利分布(Bernoulli) 伯努利分布,继承自 `Distribution` 类。 属性: - `Bernoulli.probs`:返回伯努利试验成功的概率,类型为`Tensor`。 `Distribution` 基类调用 `Bernoulli` 中私有接口以实现基类中的公有接口。`Bernoulli` 支持的公有接口为: - `mean`,`mode`,`var`:可选择传入 试验成功的概率 *probs1* 。 - `entropy`:可选择传入 试验成功的概率 *probs1* 。 - `cross_entropy`,`kl_loss`:必须传入 *dist* 和 *probs1_b* 。*dist* 为另一分布的类型,目前只支持此处为 *‘Bernoulli’* 。 *probs1_b* 为分布 *b* 的试验成功概率。可选择传入分布 *a* 的参数 *probs1_a* 。 - `prob`,`log_prob`,`cdf`,`log_cdf`,`survival_function`,`log_survival`:必须传入 *value* 。可选择传入试验成功的概率 *probs* 。 - `sample`:可选择传入样本形状 *shape* 和试验成功的概率 *probs1* 。 - `get_dist_args` :可选择传入试验成功的概率 *probs*。 - `get_dist_type` :返回 *‘Bernoulli’* 。 #### 指数分布(Exponential) 指数分布,继承自 `Distribution` 类。 属性: - `Exponential.rate`:返回分布的率参数,类型为`Tensor`。 `Distribution` 基类调用 `Exponential` 私有接口以实现基类中的公有接口。`Exponential` 支持的公有接口为: - `mean`,`mode`,`var`:可选择传入率参数 *rate* 。 - `entropy`:可选择传入率参数 *rate* 。 - `cross_entropy`,`kl_loss`:必须传入 *dist* 和 *rate_b* 。 *dist* 为另一分布的类型的名称, 目前只支持此处为 *‘Exponential’* 。*rate_b* 为分布 *b* 的率参数。可选择传入分布 *a* 的参数 *rate_a* 。 - `prob`,`log_prob`,`cdf`,`log_cdf`,`survival_function`,`log_survival`:必须传入 *value* 。可选择传入率参数 *rate* 。 - `sample`:可选择传入样本形状 *shape* 和率参数 *rate* 。 - `get_dist_args` :可选择传入率参数 *rate* 。 - `get_dist_type` :返回 *‘Exponential’* 。 #### 几何分布(Geometric) 几何分布,继承自 `Distribution` 类。 属性: - `Geometric.probs`:返回伯努利试验成功的概率,类型为`Tensor`。 `Distribution` 基类调用 `Geometric` 中私有接口以实现基类中的公有接口。`Geometric` 支持的公有接口为: - `mean`,`mode`,`var`:可选择传入试验成功的概率 *probs1* 。 - `entropy`:可选择传入 试验成功的概率 *probs1* 。 - `cross_entropy`,`kl_loss`:必须传入 *dist* 和 *probs1_b* 。*dist* 为另一分布的类型的名称,目前只支持此处为 *‘Geometric’* 。 *probs1_b* 为分布 *b* 的试验成功概率。可选择传入分布 *a* 的参数 *probs1_a* 。 - `prob`,`log_prob`,`cdf`,`log_cdf`,`survival_function`,`log_survival`:必须传入 *value* 。可选择传入试验成功的概率 *probs1* 。 - `sample`:可选择传入样本形状 *shape* 和试验成功的概率 *probs1* 。 - `get_dist_args` :可选择传入试验成功的概率 *probs1* 。 - `get_dist_type` :返回 *‘Geometric’* 。 #### 正态分布(Normal) 正态(高斯)分布,继承自 `Distribution` 类。 `Distribution` 基类调用 `Normal` 中私有接口以实现基类中的公有接口。`Normal` 支持的公有接口为: - `mean`,`mode`,`var`:可选择传入分布的参数均值 *mean* 和标准差 *sd* 。 - `entropy`:可选择传入分布的参数均值 *mean* 和标准差 *sd* 。 - `cross_entropy`,`kl_loss`:必须传入 *dist* ,*mean_b* 和 *sd_b* 。*dist* 为另一分布的类型的名称,目前只支持此处为 *‘Normal’* 。*mean_b* 和 *sd_b* 为分布 *b* 的均值和标准差。可选择传入分布的参数 *a* 均值 *mean_a* 和标准差 *sd_a* 。 - `prob`,`log_prob`,`cdf`,`log_cdf`,`survival_function`,`log_survival`:必须传入 *value* 。可选择分布的参数包括均值 *mean_a* 和标准差 *sd_a* 。 - `sample`:可选择传入样本形状 *shape* 和分布的参数包括均值 *mean_a* 和标准差 *sd_a* 。 - `get_dist_args` :可选择传入分布的参数均值 *mean* 和标准差 *sd* 。 - `get_dist_type` :返回 *‘Normal’* 。 #### 均匀分布(Uniform) 均匀分布,继承自 `Distribution` 类。 属性: - `Uniform.low`:返回分布的最小值,类型为`Tensor`。 - `Uniform.high`:返回分布的最大值,类型为`Tensor`。 `Distribution` 基类调用 `Uniform` 以实现基类中的公有接口。`Uniform` 支持的公有接口为: - `mean`,`mode`,`var`:可选择传入分布的参数最大值 *high* 和最小值 *low* 。 - `entropy`:可选择传入分布的参数最大值 *high* 和最小值 *low* 。 - `cross_entropy`,`kl_loss`:必须传入 *dist* ,*high_b* 和 *low_b* 。*dist* 为另一分布的类型的名称,目前只支持此处为 *‘Uniform’* 。 *high_b* 和 *low_b* 为分布 *b* 的参数。可选择传入分布 *a* 的参数即最大值 *high_a* 和最小值 *low_a* 。 - `prob`,`log_prob`,`cdf`,`log_cdf`,`survival_function`,`log_survival`:必须传入 *value* 。可选择传入分布的参数最大值 *high* 和最小值 *low* 。 - `sample`:可选择传入 *shape* 和分布的参数即最大值 *high* 和最小值 *low* 。 - `get_dist_args` :可选择传入分布的参数最大值 *high* 和最小值 *low* 。 - `get_dist_type` :返回 *‘Uniform’* 。 #### 多类别分布(Categorical) 多类别分布,继承自 `Distribution` 类。 属性: - `Categorical.probs`:返回各种类别的概率,类型为`Tensor`。 `Distribution` 基类调用 `Categorical` 以实现基类中的公有接口。`Categorical` 支持的公有接口为: - `mean`,`mode`,`var`:可选择传入分布的参数类别概率 *probs*。 - `entropy`:可选择传入分布的参数类别概率 *probs* 。 - `cross_entropy`,`kl_loss`:必须传入 *dist* ,*probs_b* 。*dist* 为另一分布的类型的名称,目前只支持此处为 *‘Categorical’* 。 *probs_b* 为分布 *b* 的参数。可选择传入分布 *a* 的参数即 *probs_a* 。 - `prob`,`log_prob`,`cdf`,`log_cdf`,`survival_function`,`log_survival`:必须传入 *value* 。可选择传入分布的参数类别概率 *probs* 。 - `sample`:可选择传入 *shape* 和类别概率 *probs* 。 - `get_dist_args` :可选择传入分布的参数类别概率 *probs* 。 - `get_dist_type` :返回 *‘Categorical’* 。 #### 对数正态分布(LogNormal) 对数正态分布,继承自 `TransformedDistribution` 类,由 `Exp` Bijector 和 `Normal` Distribution 构成。 属性: - `LogNormal.loc`:返回分布的位置参数,类型为`Tensor`。 - `LogNormal.scale`:返回分布的规模参数,类型为`Tensor`。 `Distribution` 基类调用 `LogNormal`及 `TransformedDistribution` 中私有接口以实现基类中的公有接口。`LogNormal` 支持的公有接口为: - `mean`,`mode`,`var`:可选择传入分布的位置参数*loc*和规模参数*scale* 。 - `entropy`:可选择传入分布的位置参数 *loc* 和规模参数 *scale* 。 - `cross_entropy`,`kl_loss`:必须传入 *dist* ,*loc_b* 和 *scale_b* 。*dist* 为另一分布的类型的名称,目前只支持此处为 *‘LogNormal’* 。*loc_b* 和 *scale_b* 为分布 *b* 的均值和标准差。可选择传入分布的参数 *a* 均值 *loc_a* 和标准差 *sclae_a* 。 - `prob`,`log_prob`,`cdf`,`log_cdf`,`survival_function`,`log_survival`:必须传入 *value* 。可选择分布的参数包括均值 *loc_a* 和标准差 *scale_a* 。`Distribution` 基类调用 `TransformedDistribution`私有接口。 - `sample`:可选择传入样本形状 *shape* 和分布的参数包括均值 *loc_a* 和标准差 *scale_a* 。`Distribution` 基类调用 `TransformedDistribution`私有接口。 - `get_dist_args` :可选择传入分布的位置参数 *loc* 和规模参数*scale* 。 - `get_dist_type` :返回 *‘LogNormal’* 。 #### 柯西分布(Cauchy) 柯西分布,继承自 `Distribution` 类。 属性: - `Cauchy.loc`:返回分布的位置参数,类型为`Tensor`。 - `Cauchy.scale`:返回分布的规模参数,类型为`Tensor`。 `Distribution` 基类调用 `Cauchy` 中私有接口以实现基类中的公有接口。`Cauchy` 支持的公有接口为: - `entropy`:可选择传入分布的位置参数*loc*和规模参数*scale*。 - `cross_entropy`,`kl_loss`:必须传入 *dist* ,*loc_b* 和 *scale_b* 。*dist* 为另一分布的类型的名称,目前只支持此处为 *‘Cauchy’* 。*loc_b* 和 *scale_b* 为分布 *b* 的位置参数和规模参数。可选择传入分布的参数 *a* 位置 *loc_a* 和规模 *scale_a* 。 - `prob`,`log_prob`,`cdf`,`log_cdf`,`survival_function`,`log_survival`:必须传入 *value* 。可选择传入分布的位置参数 *loc* 和规模参数 *scale* 。 - `sample`:可选择传入样本形状 *shape* 和分布的参数包括分布的位置参数 *loc* 和规模参数 *scale* 。 - `get_dist_args` :可选择传入分布的位置参数 *loc* 和规模参数 *scale* 。 - `get_dist_type` :返回 *‘Cauchy’* 。 #### 耿贝尔极值分布(Gumbel) 耿贝尔极值分布,继承自 `TransformedDistribution` 类,由 `GumbelCDF` Bijector和 `Uniform` Distribution 构成。 属性: - `Gumbel.loc`:返回分布的位置参数,类型为`Tensor`。 - `Gumbel.scale`:返回分布的规模参数,类型为`Tensor`。 `Distribution` 基类调用 `Gumbel` 中私有接口以实现基类中的公有接口。`Gumbel` 支持的公有接口为: - `mean`,`mode`,`sd`:无参数 。 - `entropy`:无参数 。 - `cross_entropy`,`kl_loss`:必须传入 *dist* ,*loc_b* 和 *scale_b* 。*dist* 为另一分布的类型的名称,目前只支持此处为 *‘Gumbel’* 。*loc_b* 和 *scale_b* 为分布 *b* 的位置参数和规模参数。 - `prob`,`log_prob`,`cdf`,`log_cdf`,`survival_function`,`log_survival`:必须传入 *value* 。 - `sample`:可选择传入样本形状 *shape* 。 - `get_dist_args` :可选择传入分布的位置参数 *loc* 和规模参数 *scale* 。 - `get_dist_type` :返回 *‘Gumbel’* 。 #### 逻辑斯谛分布(Logistic) 逻辑斯谛分布,继承自 `Distribution` 类。 属性: - `Logistic.loc`:返回分布的位置参数,类型为`Tensor`。 - `Logistic.scale`:返回分布的规模参数,类型为`Tensor`。 `Distribution` 基类调用 `logistic` 中私有接口以实现基类中的公有接口。`Logistic` 支持的公有接口为: - `mean`,`mode`,`sd`:可选择传入分布的位置参数 *loc* 和规模参数 *scale* 。 - `entropy`:可选择传入分布的位置参数 *loc* 和规模参数 *scale* 。 - `prob`,`log_prob`,`cdf`,`log_cdf`,`survival_function`,`log_survival`:必须传入 *value* 。可选择传入分布的位置参数 *loc* 和规模参数 *scale* 。 - `sample`:可选择传入样本形状 *shape* 和分布的参数包括分布的位置参数 *loc* 和规模参数 *scale* 。 - `get_dist_args` :可选择传入分布的位置参数 *loc* 和规模参数 *scale* 。 - `get_dist_type` :返回 *‘Logistic’* 。 ### 概率分布类在PyNative模式下的应用 `Distribution` 子类可在 **PyNative** 模式下使用。 以 `Normal` 为例, 创建一个均值为0.0、标准差为1.0的正态分布,然后计算相关函数。 ``` from mindspore import Tensor from mindspore import dtype as mstype import mindspore.context as context import mindspore.nn.probability.distribution as msd context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") my_normal = msd.Normal(0.0, 1.0, dtype=mstype.float32) mean = my_normal.mean() var = my_normal.var() entropy = my_normal.entropy() value = Tensor([-0.5, 0.0, 0.5], dtype=mstype.float32) prob = my_normal.prob(value) cdf = my_normal.cdf(value) mean_b = Tensor(1.0, dtype=mstype.float32) sd_b = Tensor(2.0, dtype=mstype.float32) kl = my_normal.kl_loss('Normal', mean_b, sd_b) print("mean: ", mean) print("var: ", var) print("entropy: ", entropy) print("prob: ", prob) print("cdf: ", cdf) print("kl: ", kl) ``` ### 概率分布类在图模式下的应用 在图模式下,`Distribution` 子类可用在网络中。 ``` import mindspore.nn as nn from mindspore import Tensor from mindspore import dtype as mstype import mindspore.context as context import mindspore.nn.probability.distribution as msd context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.normal = msd.Normal(0.0, 1.0, dtype=mstype.float32) def construct(self, value, mean, sd): pdf = self.normal.prob(value) kl = self.normal.kl_loss("Normal", mean, sd) return pdf, kl net = Net() value = Tensor([-0.5, 0.0, 0.5], dtype=mstype.float32) mean = Tensor(1.0, dtype=mstype.float32) sd = Tensor(1.0, dtype=mstype.float32) pdf, kl = net(value, mean, sd) print("pdf: ", pdf) print("kl: ", kl) ``` ### TransformedDistribution类接口设计 `TransformedDistribution` 继承自 `Distribution` ,是可通过映射f(x)变化得到的数学分布的基类。其接口包括: 1. 属性 - `bijector`:返回分布的变换方法。 - `distribution`:返回原始分布。 - `is_linear_transformation`:返回线性变换标志。 2. 接口函数(以下接口函数的参数与构造函数中 `distribution` 的对应接口的参数相同)。 - `cdf`:累积分布函数(CDF)。 - `log_cdf`:对数累积分布函数。 - `survival_function`:生存函数。 - `log_survival`:对数生存函数。 - `prob`:概率密度函数(PDF)/ 概率质量函数(PMF)。 - `log_prob`:对数似然函数。 - `sample`:随机取样。 - `mean`:无参数。只有当 `Bijector.is_constant_jacobian=true` 时可调用。 ### PyNative模式下调用TransformedDistribution实例 `TransformedDistribution` 子类可在 **PyNative** 模式下使用。 这里构造一个 `TransformedDistribution` 实例,使用 `Normal` 分布作为需要变换的分布类,使用 `Exp` 作为映射变换,可以生成 `LogNormal` 分布。 ``` import numpy as np import mindspore.nn as nn import mindspore.nn.probability.bijector as msb import mindspore.nn.probability.distribution as msd import mindspore.context as context from mindspore import Tensor, dtype context.set_context(mode=context.PYNATIVE_MODE) normal = msd.Normal(0.0, 1.0, dtype=dtype.float32) exp = msb.Exp() LogNormal = msd.TransformedDistribution(exp, normal, seed=0, name="LogNormal") # compute cumulative distribution function x = np.array([2.0, 5.0, 10.0], dtype=np.float32) tx = Tensor(x, dtype=dtype.float32) cdf = LogNormal.cdf(tx) # generate samples from the distribution shape = ((3, 2)) sample = LogNormal.sample(shape) # get information of the distribution print(LogNormal) # get information of the underlying distribution and the bijector separately print("underlying distribution:\n", LogNormal.distribution) print("bijector:\n", LogNormal.bijector) # get the computation results print("cdf:\n", cdf) print("sample:\n", sample) ``` 当构造 `TransformedDistribution` 映射变换的 `is_constant_jacobian = true` 时(如 `ScalarAffine`),构造的 `TransformedDistribution` 实例可以使用直接使用 `mean` 接口计算均值,例如: ``` normal = msd.Normal(0.0, 1.0, dtype=dtype.float32) scalaraffine = msb.ScalarAffine(1.0, 2.0) trans_dist = msd.TransformedDistribution(scalaraffine, normal, seed=0) mean = trans_dist.mean() print(mean) ``` ### 图模式下调用TransformedDistribution实例 在图模式下,`TransformedDistribution` 类可用在网络中。 ``` import numpy as np import mindspore.nn as nn from mindspore import Tensor, dtype import mindspore.context as context import mindspore.nn.probability.bijector as msb import mindspore.nn.probability.distribution as msd context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self, shape, dtype=dtype.float32, seed=0, name='transformed_distribution'): super(Net, self).__init__() # create TransformedDistribution distribution self.exp = msb.Exp() self.normal = msd.Normal(0.0, 1.0, dtype=dtype) self.lognormal = msd.TransformedDistribution(self.exp, self.normal, seed=seed, name=name) self.shape = shape def construct(self, value): cdf = self.lognormal.cdf(value) sample = self.lognormal.sample(self.shape) return cdf, sample shape = (2, 3) net = Net(shape=shape, name="LogNormal") x = np.array([2.0, 3.0, 4.0, 5.0]).astype(np.float32) tx = Tensor(x, dtype=dtype.float32) cdf, sample = net(tx) print("cdf: ", cdf) print("sample: ", sample) ``` ## 概率分布映射 Bijector(`mindspore.nn.probability.bijector`)是概率编程的基本组成部分。Bijector描述了一种随机变量的变换方法,可以通过一个已有的随机变量X和一个映射函数f生成一个新的随机变量$Y = f(x)$。 `Bijector` 提供了映射相关的四种变换方法。它可以当做算子直接使用,也可以作用在某个随机变量 `Distribution` 类实例上生成新的随机变量的 `Distribution` 类实例。 ### Bijector类接口设计 #### Bijector基类 `Bijector` 类是所有概率分布映射的基类。其接口包括: 1. 属性 - `name`:返回 `name` 的值。 - `is_dtype`:返回 `dtype` 的值。 - `parameter`:返回 `parameter` 的值。 - `is_constant_jacobian`:返回 `is_constant_jacobian` 的值。 - `is_injective`:返回 `is_injective` 的值。 2. 映射函数 - `forward`:正向映射,创建派生类后由派生类的 `_forward` 决定参数。 - `inverse`:反向映射,创建派生类后由派生类的 `_inverse` 决定参数。 - `forward_log_jacobian`:正向映射的导数的对数,创建派生类后由派生类的 `_forward_log_jacobian` 决定参数。 - `inverse_log_jacobian`:反向映射的导数的对数,创建派生类后由派生类的 `_inverse_log_jacobian` 决定参数。 `Bijector` 作为函数调用:输入是一个 `Distribution` 类:生成一个 `TransformedDistribution` **(不可在图内调用)**。 #### 幂函数变换映射(PowerTransform) `PowerTransform` 做如下变量替换:`Y = g(X) = {(1 + X * power)}^{1 / power}`。其接口包括: 1. 属性 - `power`:返回 `power` 的值,类型为`Tensor`。 2. 映射函数 - `forward`:正向映射,输入为 `Tensor` 。 - `inverse`:反向映射,输入为 `Tensor` 。 - `forward_log_jacobian`:正向映射的导数的对数,输入为 `Tensor` 。 - `inverse_log_jacobian`:反向映射的导数的对数,输入为 `Tensor` 。 #### 指数变换映射(Exp) `Exp` 做如下变量替换:`Y = g(X)= exp(X)`。其接口包括: 映射函数 - `forward`:正向映射,输入为 `Tensor` 。 - `inverse`:反向映射,输入为 `Tensor` 。 - `forward_log_jacobian`:正向映射的导数的对数,输入为 `Tensor` 。 - `inverse_log_jacobian`:反向映射的导数的对数,输入为 `Tensor` 。 #### 标量仿射变换映射(ScalarAffine) `ScalarAffine` 做如下变量替换:`Y = g(X) = scale * X + shift`。其接口包括: 1. 属性 - `scale`:返回`scale`的值,类型为`Tensor`。 - `shift`:返回`shift`的值,类型为`Tensor`。 2. 映射函数 - `forward`:正向映射,输入为 `Tensor` 。 - `inverse`:反向映射,输入为 `Tensor` 。 - `forward_log_jacobian`:正向映射的导数的对数,输入为 `Tensor` 。 - `inverse_log_jacobian`:反向映射的导数的对数,输入为 `Tensor` 。 #### Softplus变换映射(Softplus) `Softplus` 做如下变量替换:`Y = g(X) = log(1 + e ^ {sharpness * X}) / sharpness`。其接口包括: 1. 属性 - `sharpness`:返回 `sharpness` 的值,类型为`Tensor`。 2. 映射函数 - `forward`:正向映射,输入为 `Tensor` 。 - `inverse`:反向映射,输入为 `Tensor` 。 - `forward_log_jacobian`:正向映射的导数的对数,输入为 `Tensor` 。 - `inverse_log_jacobian`:反向映射的导数的对数,输入为 `Tensor` 。 #### 耿贝尔累计密度函数映射(GumbelCDF) `GumbelCDF` 做如下变量替换:$Y = g(X) = \exp(-\exp(-\frac{X - loc}{scale}))$。其接口包括: 1. 属性 - `loc`:返回`loc`的值,类型为`Tensor`。 - `scale`:返回`scale`的值,类型为`Tensor`。 2. 映射函数 - `forward`:正向映射,输入为 `Tensor` 。 - `inverse`:反向映射,输入为 `Tensor` 。 - `forward_log_jacobian`:正向映射的导数的对数,输入为 `Tensor` 。 - `inverse_log_jacobian`:反向映射的导数的对数,输入为 `Tensor` 。 #### 逆映射(Invert) `Invert` 对一个映射做逆变换,其接口包括: 1. 属性 - `bijector`:返回初始化时使用的*Bijector*,类型为`Bijector`。 2. 映射函数 - `forward`:正向映射,输入为 `Tensor` 。 - `inverse`:反向映射,输入为 `Tensor` 。 - `forward_log_jacobian`:正向映射的导数的对数,输入为 `Tensor` 。 - `inverse_log_jacobian`:反向映射的导数的对数,输入为 `Tensor` 。 ### PyNative模式下调用Bijector实例 在执行之前,我们需要导入需要的库文件包。双射类最主要的库是 `mindspore.nn.probability.bijector`,导入后我们使用 `msb` 作为库的缩写并进行调用。 下面我们以 `PowerTransform` 为例。创建一个指数为2的 `PowerTransform` 对象。 ``` import numpy as np import mindspore.nn as nn import mindspore.nn.probability.bijector as msb import mindspore.context as context from mindspore import Tensor, dtype context.set_context(mode=context.PYNATIVE_MODE) powertransform = msb.PowerTransform(power=2.) x = np.array([2.0, 3.0, 4.0, 5.0], dtype=np.float32) tx = Tensor(x, dtype=dtype.float32) forward = powertransform.forward(tx) inverse = powertransform.inverse(tx) forward_log_jaco = powertransform.forward_log_jacobian(tx) inverse_log_jaco = powertransform.inverse_log_jacobian(tx) print(powertransform) print("forward: ", forward) print("inverse: ", inverse) print("forward_log_jacobian: ", forward_log_jaco) print("inverse_log_jacobian: ", inverse_log_jaco) ``` ### 图模式下调用Bijector实例 在图模式下,`Bijector` 子类可用在网络中。 ``` import numpy as np import mindspore.nn as nn from mindspore import Tensor from mindspore import dtype as mstype import mindspore.context as context import mindspore.nn.probability.bijector as msb context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self): super(Net, self).__init__() # create a PowerTransform bijector self.powertransform = msb.PowerTransform(power=2.) def construct(self, value): forward = self.powertransform.forward(value) inverse = self.powertransform.inverse(value) forward_log_jaco = self.powertransform.forward_log_jacobian(value) inverse_log_jaco = self.powertransform.inverse_log_jacobian(value) return forward, inverse, forward_log_jaco, inverse_log_jaco net = Net() x = np.array([2.0, 3.0, 4.0, 5.0]).astype(np.float32) tx = Tensor(x, dtype=mstype.float32) forward, inverse, forward_log_jaco, inverse_log_jaco = net(tx) print("forward: ", forward) print("inverse: ", inverse) print("forward_log_jacobian: ", forward_log_jaco) print("inverse_log_jacobian: ", inverse_log_jaco) ``` ## 深度概率网络 使用MindSpore深度概率编程库(`mindspore.nn.probability.dpn`)来构造变分自编码器(VAE)进行推理尤为简单。我们只需要自定义编码器和解码器(DNN模型),调用VAE或CVAE接口形成其派生网络,然后调用ELBO接口进行优化,最后使用SVI接口进行变分推理。这样做的好处是,不熟悉变分推理的用户可以像构建DNN模型一样来构建概率模型,而熟悉的用户可以调用这些接口来构建更为复杂的概率模型。VAE的接口在`mindspore.nn.probability.dpn`下面,dpn代表的是Deep probabilistic network,这里提供了一些基本的深度概率网络的接口,例如VAE。 ### VAE 首先,我们需要先自定义encoder和decoder,调用`mindspore.nn.probability.dpn.VAE`接口来构建VAE网络,我们除了传入encoder和decoder之外,还需要传入encoder输出变量的维度hidden size,以及VAE网络存储潜在变量的维度latent size,一般latent size会小于hidden size。 ``` import mindspore.nn as nn import mindspore.ops as ops from mindspore.nn.probability.dpn import VAE IMAGE_SHAPE = (-1, 1, 32, 32) class Encoder(nn.Cell): def __init__(self): super(Encoder, self).__init__() self.fc1 = nn.Dense(1024, 800) self.fc2 = nn.Dense(800, 400) self.relu = nn.ReLU() self.flatten = nn.Flatten() def construct(self, x): x = self.flatten(x) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.relu(x) return x class Decoder(nn.Cell): def __init__(self): super(Decoder, self).__init__() self.fc1 = nn.Dense(400, 1024) self.sigmoid = nn.Sigmoid() self.reshape = ops.Reshape() def construct(self, z): z = self.fc1(z) z = self.reshape(z, IMAGE_SHAPE) z = self.sigmoid(z) return z encoder = Encoder() decoder = Decoder() vae = VAE(encoder, decoder, hidden_size=400, latent_size=20) ``` ### ConditionalVAE 类似地,ConditionalVAE与VAE的使用方法比较相近,不同的是,ConditionalVAE利用了数据集的标签信息,属于有监督学习算法,其生成效果一般会比VAE好。 首先,先自定义encoder和decoder,并调用`mindspore.nn.probability.dpn.ConditionalVAE`接口来构建ConditionalVAE网络,这里的encoder和VAE的不同,因为需要传入数据集的标签信息;decoder和上述的一样。ConditionalVAE接口的传入则还需要传入数据集的标签类别个数,其余和VAE接口一样。 ``` import mindspore.nn as nn import mindspore.ops as ops from mindspore.nn.probability.dpn import ConditionalVAE IMAGE_SHAPE = (-1, 1, 32, 32) class Encoder(nn.Cell): def __init__(self, num_classes): super(Encoder, self).__init__() self.fc1 = nn.Dense(1024 + num_classes, 400) self.relu = nn.ReLU() self.flatten = nn.Flatten() self.concat = ops.Concat(axis=1) self.one_hot = nn.OneHot(depth=num_classes) def construct(self, x, y): x = self.flatten(x) y = self.one_hot(y) input_x = self.concat((x, y)) input_x = self.fc1(input_x) input_x = self.relu(input_x) return input_x class Decoder(nn.Cell): def __init__(self): super(Decoder, self).__init__() self.fc1 = nn.Dense(400, 1024) self.sigmoid = nn.Sigmoid() self.reshape = ops.Reshape() def construct(self, z): z = self.fc1(z) z = self.reshape(z, IMAGE_SHAPE) z = self.sigmoid(z) return z encoder = Encoder(num_classes=10) decoder = Decoder() cvae = ConditionalVAE(encoder, decoder, hidden_size=400, latent_size=20, num_classes=10) ``` 加载数据集,我们可以使用Mnist数据集,具体的数据加载和预处理过程可以参考这里[实现一个图片分类应用](https://www.mindspore.cn/tutorial/training/zh-CN/master/quick_start/quick_start.html),这里会用到create_dataset函数创建数据迭代器。 ``` import mindspore.dataset as ds from mindspore.common.initializer import Normal from mindspore.dataset.vision import Inter from mindspore import dtype as mstype import mindspore.dataset.vision.c_transforms as CV import mindspore.dataset.transforms.c_transforms as C def create_dataset(data_path, batch_size=32, repeat_size=1, num_parallel_workers=1): """ create dataset for train or test Args: data_path: Data path batch_size: The number of data records in each group repeat_size: The number of replicated data records num_parallel_workers: The number of parallel workers """ # define dataset mnist_ds = ds.MnistDataset(data_path) # define operation parameters resize_height, resize_width = 32, 32 rescale = 1.0 / 255.0 shift = 0.0 rescale_nml = 1 / 0.3081 shift_nml = -1 * 0.1307 / 0.3081 # define map operations resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Resize images to (32, 32) rescale_nml_op = CV.Rescale(rescale_nml, shift_nml) # normalize images rescale_op = CV.Rescale(rescale, shift) # rescale images hwc2chw_op = CV.HWC2CHW() # change shape from (height, width, channel) to (channel, height, width) to fit network. type_cast_op = C.TypeCast(mstype.int32) # change data type of label to int32 to fit network # apply map operations on images mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=num_parallel_workers) mnist_ds = mnist_ds.map(operations=resize_op, input_columns="image", num_parallel_workers=num_parallel_workers) mnist_ds = mnist_ds.map(operations=rescale_op, input_columns="image", num_parallel_workers=num_parallel_workers) mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns="image", num_parallel_workers=num_parallel_workers) mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns="image", num_parallel_workers=num_parallel_workers) # apply DatasetOps buffer_size = 10000 mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size) # 10000 as in LeNet train script mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True) mnist_ds = mnist_ds.repeat(repeat_size) return mnist_ds image_path = "./MNIST/train" ds_train = create_dataset(image_path, 128, 1) ``` 接下来,需要用到infer接口进行VAE网络的变分推断。 ## 概率推断算法 调用ELBO接口(`mindspore.nn.probability.infer.ELBO`)来定义VAE网络的损失函数,调用`WithLossCell`封装VAE网络和损失函数,并定义优化器,之后传入SVI接口(`mindspore.nn.probability.infer.SVI`)。SVI的`run`函数可理解为VAE网络的训练,可以指定训练的`epochs`,返回结果为训练好的网络;`get_train_loss`函数可以返回训练好后模型的loss。 ``` from mindspore.nn.probability.infer import ELBO, SVI net_loss = ELBO(latent_prior='Normal', output_prior='Normal') net_with_loss = nn.WithLossCell(vae, net_loss) optimizer = nn.Adam(params=vae.trainable_params(), learning_rate=0.001) vi = SVI(net_with_loss=net_with_loss, optimizer=optimizer) vae = vi.run(train_dataset=ds_train, epochs=10) trained_loss = vi.get_train_loss() ``` 最后,得到训练好的VAE网络后,我们可以使用`vae.generate_sample`生成新样本,需要传入待生成样本的个数,及生成样本的shape,shape需要保持和原数据集中的样本shape一样;当然,我们也可以使用`vae.reconstruct_sample`重构原来数据集中的样本,来测试VAE网络的重建能力。 ``` generated_sample = vae.generate_sample(64, IMAGE_SHAPE) for sample in ds_train.create_dict_iterator(): sample_x = Tensor(sample['image'], dtype=mstype.float32) reconstructed_sample = vae.reconstruct_sample(sample_x) print('The shape of the generated sample is ', generated_sample.shape) ``` ConditionalVAE训练过程和VAE的过程类似,但需要注意的是使用训练好的ConditionalVAE网络生成新样本和重建新样本时,需要输入标签信息,例如下面生成的新样本就是64个0-7的数字。 ``` sample_label = Tensor([i for i in range(0, 8)] * 8, dtype=mstype.int32) generated_sample = cvae.generate_sample(sample_label, 64, IMAGE_SHAPE) for sample in ds_train.create_dict_iterator(): sample_x = Tensor(sample['image'], dtype=mstype.float32) sample_y = Tensor(sample['label'], dtype=mstype.int32) reconstructed_sample = cvae.reconstruct_sample(sample_x, sample_y) print('The shape of the generated sample is ', generated_sample.shape) ``` 如果希望新生成的样本更好,更清晰,用户可以自己定义更复杂的encoder和decoder,这里的示例只用了两层全连接层,仅供示例的指导。 ## 贝叶斯层 下面的范例使用MindSpore的`nn.probability.bnn_layers`中的API实现BNN图片分类模型。MindSpore的`nn.probability.bnn_layers`中的API包括`NormalPrior`,`NormalPosterior`,`ConvReparam`,`DenseReparam`和`WithBNNLossCell`。BNN与DNN的最大区别在于,BNN层的weight和bias不再是确定的值,而是服从一个分布。其中,`NormalPrior`,`NormalPosterior`分别用来生成服从正态分布的先验分布和后验分布;`ConvReparam`和`DenseReparam`分别是使用reparameteration方法实现的贝叶斯卷积层和全连接层;`WithBNNLossCell`是用来封装BNN和损失函数的。 如何使用`nn.probability.bnn_layers`中的API构建贝叶斯神经网络并实现图片分类,可以参考教程[使用贝叶斯网络](https://www.mindspore.cn/tutorial/training/zh-CN/master/advanced_use/apply_deep_probability_programming.html#id3)。 ## 贝叶斯转换 对于不熟悉贝叶斯模型的研究人员,MDP提供了贝叶斯转换接口(`mindspore.nn.probability.transform`),支持DNN (Deep Neural Network)模型一键转换成BNN (Bayesian Neural Network)模型。 其中的模型转换API`TransformToBNN`的`__init__`函数定义如下: ``` class TransformToBNN: def __init__(self, trainable_dnn, dnn_factor=1, bnn_factor=1): net_with_loss = trainable_dnn.network self.optimizer = trainable_dnn.optimizer self.backbone = net_with_loss.backbone_network self.loss_fn = getattr(net_with_loss, "_loss_fn") self.dnn_factor = dnn_factor self.bnn_factor = bnn_factor self.bnn_loss_file = None ``` 参数`trainable_bnn`是经过`TrainOneStepCell`包装的可训练DNN模型,`dnn_factor`和`bnn_factor`分别为由损失函数计算得到的网络整体损失的系数和每个贝叶斯层的KL散度的系数。 API`TransformToBNN`主要实现了两个功能: - 功能一:转换整个模型 `transform_to_bnn_model`方法可以将整个DNN模型转换为BNN模型。其定义如下: ```python def transform_to_bnn_model(self, get_dense_args=lambda dp: {"in_channels": dp.in_channels, "has_bias": dp.has_bias, "out_channels": dp.out_channels, "activation": dp.activation}, get_conv_args=lambda dp: {"in_channels": dp.in_channels, "out_channels": dp.out_channels, "pad_mode": dp.pad_mode, "kernel_size": dp.kernel_size, "stride": dp.stride, "has_bias": dp.has_bias, "padding": dp.padding, "dilation": dp.dilation, "group": dp.group}, add_dense_args=None, add_conv_args=None): r""" Transform the whole DNN model to BNN model, and wrap BNN model by TrainOneStepCell. Args: get_dense_args (function): The arguments gotten from the DNN full connection layer. Default: lambda dp: {"in_channels": dp.in_channels, "out_channels": dp.out_channels, "has_bias": dp.has_bias}. get_conv_args (function): The arguments gotten from the DNN convolutional layer. Default: lambda dp: {"in_channels": dp.in_channels, "out_channels": dp.out_channels, "pad_mode": dp.pad_mode, "kernel_size": dp.kernel_size, "stride": dp.stride, "has_bias": dp.has_bias}. add_dense_args (dict): The new arguments added to BNN full connection layer. Default: {}. add_conv_args (dict): The new arguments added to BNN convolutional layer. Default: {}. Returns: Cell, a trainable BNN model wrapped by TrainOneStepCell. """ ``` 参数`get_dense_args`指定从DNN模型的全连接层中获取哪些参数,默认值是DNN模型的全连接层和BNN的全连接层所共有的参数,参数具体的含义可以参考[API说明文档](https://www.mindspore.cn/doc/api_python/zh-CN/master/mindspore/nn/mindspore.nn.Dense.html);`get_conv_args`指定从DNN模型的卷积层中获取哪些参数,默认值是DNN模型的卷积层和BNN的卷积层所共有的参数,参数具体的含义可以参考[API说明文档](https://www.mindspore.cn/doc/api_python/zh-CN/master/mindspore/nn/mindspore.nn.Conv2d.html);参数`add_dense_args`和`add_conv_args`分别指定了要为BNN层指定哪些新的参数值。需要注意的是,`add_dense_args`中的参数不能与`get_dense_args`重复,`add_conv_args`和`get_conv_args`也是如此。 - 功能二:转换指定类型的层 `transform_to_bnn_layer`方法可以将DNN模型中指定类型的层(`nn.Dense`或者`nn.Conv2d`)转换为对应的贝叶斯层。其定义如下: ```python def transform_to_bnn_layer(self, dnn_layer, bnn_layer, get_args=None, add_args=None): r""" Transform a specific type of layers in DNN model to corresponding BNN layer. Args: dnn_layer_type (Cell): The type of DNN layer to be transformed to BNN layer. The optional values are nn.Dense, nn.Conv2d. bnn_layer_type (Cell): The type of BNN layer to be transformed to. The optional values are DenseReparameterization, ConvReparameterization. get_args (dict): The arguments gotten from the DNN layer. Default: None. add_args (dict): The new arguments added to BNN layer. Default: None. Returns: Cell, a trainable model wrapped by TrainOneStepCell, whose sprcific type of layer is transformed to the corresponding bayesian layer. """ ``` 参数`dnn_layer`指定将哪个类型的DNN层转换成BNN层,`bnn_layer`指定DNN层将转换成哪个类型的BNN层,`get_args`和`add_args`分别指定从DNN层中获取哪些参数和要为BNN层的哪些参数重新赋值。 如何在MindSpore中使用API`TransformToBNN`可以参考教程[DNN一键转换成BNN](https://www.mindspore.cn/tutorial/training/zh-CN/master/advanced_use/apply_deep_probability_programming.html#dnnbnn) ## 贝叶斯工具箱 贝叶斯神经网络的优势之一就是可以获取不确定性,MDP在上层提供了不确定性估计的工具箱(`mindspore.nn.probability.toolbox`),用户可以很方便地使用该工具箱计算不确定性。不确定性意味着深度学习模型对预测结果的不确定程度。目前,大多数深度学习算法只能给出高置信度的预测结果,而不能判断预测结果的确定性,不确定性主要有两种类型:偶然不确定性和认知不确定性。 - 偶然不确定性(Aleatoric Uncertainty):描述数据中的内在噪声,即无法避免的误差,这个现象不能通过增加采样数据来削弱。 - 认知不确定性(Epistemic Uncertainty):模型自身对输入数据的估计可能因为训练不佳、训练数据不够等原因而不准确,可以通过增加训练数据等方式来缓解。 不确定性评估工具箱的接口如下: - `model`:待评估不确定性的已训练好的模型。 - `train_dataset`:用于训练的数据集,迭代器类型。 - `task_type`:模型的类型,字符串,输入“regression”或者“classification”。 - `num_classes`:如果是分类模型,需要指定类别的标签数量。 - `epochs`:用于训练不确定模型的迭代数。 - `epi_uncer_model_path`:用于存储或加载计算认知不确定性的模型的路径。 - `ale_uncer_model_path`:用于存储或加载计算偶然不确定性的模型的路径。 - `save_model`:布尔类型,是否需要存储模型。 在使用前,需要先训练好模型,以LeNet5为例,使用方式如下: ``` import mindspore.nn as nn from mindspore import Tensor from mindspore.nn.probability.toolbox.uncertainty_evaluation import UncertaintyEvaluation from mindspore import load_checkpoint, load_param_into_net class LeNet5(nn.Cell): """Lenet network structure.""" # define the operator required def __init__(self, num_class=10, num_channel=1): super(LeNet5, self).__init__() self.conv1 = nn.Conv2d(num_channel, 6, 5, pad_mode='valid') self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid') self.fc1 = nn.Dense(16 * 5 * 5, 120, weight_init=Normal(0.02)) self.fc2 = nn.Dense(120, 84, weight_init=Normal(0.02)) self.fc3 = nn.Dense(84, num_class, weight_init=Normal(0.02)) self.relu = nn.ReLU() self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) self.flatten = nn.Flatten() # use the preceding operators to construct networks def construct(self, x): x = self.max_pool2d(self.relu(self.conv1(x))) x = self.max_pool2d(self.relu(self.conv2(x))) x = self.flatten(x) x = self.relu(self.fc1(x)) x = self.relu(self.fc2(x)) x = self.fc3(x) return x if __name__ == '__main__': # get trained model network = LeNet5() param_dict = load_checkpoint('checkpoint_lenet.ckpt') load_param_into_net(network, param_dict) # get train and eval dataset ds_train = create_dataset('MNIST/train') ds_eval = create_dataset('MNIST/test') evaluation = UncertaintyEvaluation(model=network, train_dataset=ds_train, task_type='classification', num_classes=10, epochs=1, epi_uncer_model_path=None, ale_uncer_model_path=None, save_model=False) for eval_data in ds_eval.create_dict_iterator(): eval_data = Tensor(eval_data['image'], mstype.float32) epistemic_uncertainty = evaluation.eval_epistemic_uncertainty(eval_data) aleatoric_uncertainty = evaluation.eval_aleatoric_uncertainty(eval_data) print('The shape of epistemic uncertainty is ', epistemic_uncertainty.shape) print('The shape of aleatoric uncertainty is ', aleatoric_uncertainty.shape) ``` `eval_epistemic_uncertainty`计算的是认知不确定性,也叫模型不确定性,对于每一个样本的每个预测标签都会有一个不确定值;`eval_aleatoric_uncertainty`计算的是偶然不确定性,也叫数据不确定性,对于每一个样本都会有一个不确定值。 uncertainty的值大于等于0,越大表示不确定性越高。
github_jupyter
from mindspore import Tensor from mindspore import dtype as mstype import mindspore.context as context import mindspore.nn.probability.distribution as msd context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU") my_normal = msd.Normal(0.0, 1.0, dtype=mstype.float32) mean = my_normal.mean() var = my_normal.var() entropy = my_normal.entropy() value = Tensor([-0.5, 0.0, 0.5], dtype=mstype.float32) prob = my_normal.prob(value) cdf = my_normal.cdf(value) mean_b = Tensor(1.0, dtype=mstype.float32) sd_b = Tensor(2.0, dtype=mstype.float32) kl = my_normal.kl_loss('Normal', mean_b, sd_b) print("mean: ", mean) print("var: ", var) print("entropy: ", entropy) print("prob: ", prob) print("cdf: ", cdf) print("kl: ", kl) import mindspore.nn as nn from mindspore import Tensor from mindspore import dtype as mstype import mindspore.context as context import mindspore.nn.probability.distribution as msd context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self): super(Net, self).__init__() self.normal = msd.Normal(0.0, 1.0, dtype=mstype.float32) def construct(self, value, mean, sd): pdf = self.normal.prob(value) kl = self.normal.kl_loss("Normal", mean, sd) return pdf, kl net = Net() value = Tensor([-0.5, 0.0, 0.5], dtype=mstype.float32) mean = Tensor(1.0, dtype=mstype.float32) sd = Tensor(1.0, dtype=mstype.float32) pdf, kl = net(value, mean, sd) print("pdf: ", pdf) print("kl: ", kl) import numpy as np import mindspore.nn as nn import mindspore.nn.probability.bijector as msb import mindspore.nn.probability.distribution as msd import mindspore.context as context from mindspore import Tensor, dtype context.set_context(mode=context.PYNATIVE_MODE) normal = msd.Normal(0.0, 1.0, dtype=dtype.float32) exp = msb.Exp() LogNormal = msd.TransformedDistribution(exp, normal, seed=0, name="LogNormal") # compute cumulative distribution function x = np.array([2.0, 5.0, 10.0], dtype=np.float32) tx = Tensor(x, dtype=dtype.float32) cdf = LogNormal.cdf(tx) # generate samples from the distribution shape = ((3, 2)) sample = LogNormal.sample(shape) # get information of the distribution print(LogNormal) # get information of the underlying distribution and the bijector separately print("underlying distribution:\n", LogNormal.distribution) print("bijector:\n", LogNormal.bijector) # get the computation results print("cdf:\n", cdf) print("sample:\n", sample) normal = msd.Normal(0.0, 1.0, dtype=dtype.float32) scalaraffine = msb.ScalarAffine(1.0, 2.0) trans_dist = msd.TransformedDistribution(scalaraffine, normal, seed=0) mean = trans_dist.mean() print(mean) import numpy as np import mindspore.nn as nn from mindspore import Tensor, dtype import mindspore.context as context import mindspore.nn.probability.bijector as msb import mindspore.nn.probability.distribution as msd context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self, shape, dtype=dtype.float32, seed=0, name='transformed_distribution'): super(Net, self).__init__() # create TransformedDistribution distribution self.exp = msb.Exp() self.normal = msd.Normal(0.0, 1.0, dtype=dtype) self.lognormal = msd.TransformedDistribution(self.exp, self.normal, seed=seed, name=name) self.shape = shape def construct(self, value): cdf = self.lognormal.cdf(value) sample = self.lognormal.sample(self.shape) return cdf, sample shape = (2, 3) net = Net(shape=shape, name="LogNormal") x = np.array([2.0, 3.0, 4.0, 5.0]).astype(np.float32) tx = Tensor(x, dtype=dtype.float32) cdf, sample = net(tx) print("cdf: ", cdf) print("sample: ", sample) import numpy as np import mindspore.nn as nn import mindspore.nn.probability.bijector as msb import mindspore.context as context from mindspore import Tensor, dtype context.set_context(mode=context.PYNATIVE_MODE) powertransform = msb.PowerTransform(power=2.) x = np.array([2.0, 3.0, 4.0, 5.0], dtype=np.float32) tx = Tensor(x, dtype=dtype.float32) forward = powertransform.forward(tx) inverse = powertransform.inverse(tx) forward_log_jaco = powertransform.forward_log_jacobian(tx) inverse_log_jaco = powertransform.inverse_log_jacobian(tx) print(powertransform) print("forward: ", forward) print("inverse: ", inverse) print("forward_log_jacobian: ", forward_log_jaco) print("inverse_log_jacobian: ", inverse_log_jaco) import numpy as np import mindspore.nn as nn from mindspore import Tensor from mindspore import dtype as mstype import mindspore.context as context import mindspore.nn.probability.bijector as msb context.set_context(mode=context.GRAPH_MODE) class Net(nn.Cell): def __init__(self): super(Net, self).__init__() # create a PowerTransform bijector self.powertransform = msb.PowerTransform(power=2.) def construct(self, value): forward = self.powertransform.forward(value) inverse = self.powertransform.inverse(value) forward_log_jaco = self.powertransform.forward_log_jacobian(value) inverse_log_jaco = self.powertransform.inverse_log_jacobian(value) return forward, inverse, forward_log_jaco, inverse_log_jaco net = Net() x = np.array([2.0, 3.0, 4.0, 5.0]).astype(np.float32) tx = Tensor(x, dtype=mstype.float32) forward, inverse, forward_log_jaco, inverse_log_jaco = net(tx) print("forward: ", forward) print("inverse: ", inverse) print("forward_log_jacobian: ", forward_log_jaco) print("inverse_log_jacobian: ", inverse_log_jaco) import mindspore.nn as nn import mindspore.ops as ops from mindspore.nn.probability.dpn import VAE IMAGE_SHAPE = (-1, 1, 32, 32) class Encoder(nn.Cell): def __init__(self): super(Encoder, self).__init__() self.fc1 = nn.Dense(1024, 800) self.fc2 = nn.Dense(800, 400) self.relu = nn.ReLU() self.flatten = nn.Flatten() def construct(self, x): x = self.flatten(x) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.relu(x) return x class Decoder(nn.Cell): def __init__(self): super(Decoder, self).__init__() self.fc1 = nn.Dense(400, 1024) self.sigmoid = nn.Sigmoid() self.reshape = ops.Reshape() def construct(self, z): z = self.fc1(z) z = self.reshape(z, IMAGE_SHAPE) z = self.sigmoid(z) return z encoder = Encoder() decoder = Decoder() vae = VAE(encoder, decoder, hidden_size=400, latent_size=20) import mindspore.nn as nn import mindspore.ops as ops from mindspore.nn.probability.dpn import ConditionalVAE IMAGE_SHAPE = (-1, 1, 32, 32) class Encoder(nn.Cell): def __init__(self, num_classes): super(Encoder, self).__init__() self.fc1 = nn.Dense(1024 + num_classes, 400) self.relu = nn.ReLU() self.flatten = nn.Flatten() self.concat = ops.Concat(axis=1) self.one_hot = nn.OneHot(depth=num_classes) def construct(self, x, y): x = self.flatten(x) y = self.one_hot(y) input_x = self.concat((x, y)) input_x = self.fc1(input_x) input_x = self.relu(input_x) return input_x class Decoder(nn.Cell): def __init__(self): super(Decoder, self).__init__() self.fc1 = nn.Dense(400, 1024) self.sigmoid = nn.Sigmoid() self.reshape = ops.Reshape() def construct(self, z): z = self.fc1(z) z = self.reshape(z, IMAGE_SHAPE) z = self.sigmoid(z) return z encoder = Encoder(num_classes=10) decoder = Decoder() cvae = ConditionalVAE(encoder, decoder, hidden_size=400, latent_size=20, num_classes=10) import mindspore.dataset as ds from mindspore.common.initializer import Normal from mindspore.dataset.vision import Inter from mindspore import dtype as mstype import mindspore.dataset.vision.c_transforms as CV import mindspore.dataset.transforms.c_transforms as C def create_dataset(data_path, batch_size=32, repeat_size=1, num_parallel_workers=1): """ create dataset for train or test Args: data_path: Data path batch_size: The number of data records in each group repeat_size: The number of replicated data records num_parallel_workers: The number of parallel workers """ # define dataset mnist_ds = ds.MnistDataset(data_path) # define operation parameters resize_height, resize_width = 32, 32 rescale = 1.0 / 255.0 shift = 0.0 rescale_nml = 1 / 0.3081 shift_nml = -1 * 0.1307 / 0.3081 # define map operations resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Resize images to (32, 32) rescale_nml_op = CV.Rescale(rescale_nml, shift_nml) # normalize images rescale_op = CV.Rescale(rescale, shift) # rescale images hwc2chw_op = CV.HWC2CHW() # change shape from (height, width, channel) to (channel, height, width) to fit network. type_cast_op = C.TypeCast(mstype.int32) # change data type of label to int32 to fit network # apply map operations on images mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=num_parallel_workers) mnist_ds = mnist_ds.map(operations=resize_op, input_columns="image", num_parallel_workers=num_parallel_workers) mnist_ds = mnist_ds.map(operations=rescale_op, input_columns="image", num_parallel_workers=num_parallel_workers) mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns="image", num_parallel_workers=num_parallel_workers) mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns="image", num_parallel_workers=num_parallel_workers) # apply DatasetOps buffer_size = 10000 mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size) # 10000 as in LeNet train script mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True) mnist_ds = mnist_ds.repeat(repeat_size) return mnist_ds image_path = "./MNIST/train" ds_train = create_dataset(image_path, 128, 1) from mindspore.nn.probability.infer import ELBO, SVI net_loss = ELBO(latent_prior='Normal', output_prior='Normal') net_with_loss = nn.WithLossCell(vae, net_loss) optimizer = nn.Adam(params=vae.trainable_params(), learning_rate=0.001) vi = SVI(net_with_loss=net_with_loss, optimizer=optimizer) vae = vi.run(train_dataset=ds_train, epochs=10) trained_loss = vi.get_train_loss() generated_sample = vae.generate_sample(64, IMAGE_SHAPE) for sample in ds_train.create_dict_iterator(): sample_x = Tensor(sample['image'], dtype=mstype.float32) reconstructed_sample = vae.reconstruct_sample(sample_x) print('The shape of the generated sample is ', generated_sample.shape) sample_label = Tensor([i for i in range(0, 8)] * 8, dtype=mstype.int32) generated_sample = cvae.generate_sample(sample_label, 64, IMAGE_SHAPE) for sample in ds_train.create_dict_iterator(): sample_x = Tensor(sample['image'], dtype=mstype.float32) sample_y = Tensor(sample['label'], dtype=mstype.int32) reconstructed_sample = cvae.reconstruct_sample(sample_x, sample_y) print('The shape of the generated sample is ', generated_sample.shape) class TransformToBNN: def __init__(self, trainable_dnn, dnn_factor=1, bnn_factor=1): net_with_loss = trainable_dnn.network self.optimizer = trainable_dnn.optimizer self.backbone = net_with_loss.backbone_network self.loss_fn = getattr(net_with_loss, "_loss_fn") self.dnn_factor = dnn_factor self.bnn_factor = bnn_factor self.bnn_loss_file = None def transform_to_bnn_model(self, get_dense_args=lambda dp: {"in_channels": dp.in_channels, "has_bias": dp.has_bias, "out_channels": dp.out_channels, "activation": dp.activation}, get_conv_args=lambda dp: {"in_channels": dp.in_channels, "out_channels": dp.out_channels, "pad_mode": dp.pad_mode, "kernel_size": dp.kernel_size, "stride": dp.stride, "has_bias": dp.has_bias, "padding": dp.padding, "dilation": dp.dilation, "group": dp.group}, add_dense_args=None, add_conv_args=None): r""" Transform the whole DNN model to BNN model, and wrap BNN model by TrainOneStepCell. Args: get_dense_args (function): The arguments gotten from the DNN full connection layer. Default: lambda dp: {"in_channels": dp.in_channels, "out_channels": dp.out_channels, "has_bias": dp.has_bias}. get_conv_args (function): The arguments gotten from the DNN convolutional layer. Default: lambda dp: {"in_channels": dp.in_channels, "out_channels": dp.out_channels, "pad_mode": dp.pad_mode, "kernel_size": dp.kernel_size, "stride": dp.stride, "has_bias": dp.has_bias}. add_dense_args (dict): The new arguments added to BNN full connection layer. Default: {}. add_conv_args (dict): The new arguments added to BNN convolutional layer. Default: {}. Returns: Cell, a trainable BNN model wrapped by TrainOneStepCell. """ ``` 参数`get_dense_args`指定从DNN模型的全连接层中获取哪些参数,默认值是DNN模型的全连接层和BNN的全连接层所共有的参数,参数具体的含义可以参考[API说明文档](https://www.mindspore.cn/doc/api_python/zh-CN/master/mindspore/nn/mindspore.nn.Dense.html);`get_conv_args`指定从DNN模型的卷积层中获取哪些参数,默认值是DNN模型的卷积层和BNN的卷积层所共有的参数,参数具体的含义可以参考[API说明文档](https://www.mindspore.cn/doc/api_python/zh-CN/master/mindspore/nn/mindspore.nn.Conv2d.html);参数`add_dense_args`和`add_conv_args`分别指定了要为BNN层指定哪些新的参数值。需要注意的是,`add_dense_args`中的参数不能与`get_dense_args`重复,`add_conv_args`和`get_conv_args`也是如此。 - 功能二:转换指定类型的层 `transform_to_bnn_layer`方法可以将DNN模型中指定类型的层(`nn.Dense`或者`nn.Conv2d`)转换为对应的贝叶斯层。其定义如下: ```python def transform_to_bnn_layer(self, dnn_layer, bnn_layer, get_args=None, add_args=None): r""" Transform a specific type of layers in DNN model to corresponding BNN layer. Args: dnn_layer_type (Cell): The type of DNN layer to be transformed to BNN layer. The optional values are nn.Dense, nn.Conv2d. bnn_layer_type (Cell): The type of BNN layer to be transformed to. The optional values are DenseReparameterization, ConvReparameterization. get_args (dict): The arguments gotten from the DNN layer. Default: None. add_args (dict): The new arguments added to BNN layer. Default: None. Returns: Cell, a trainable model wrapped by TrainOneStepCell, whose sprcific type of layer is transformed to the corresponding bayesian layer. """ ``` 参数`dnn_layer`指定将哪个类型的DNN层转换成BNN层,`bnn_layer`指定DNN层将转换成哪个类型的BNN层,`get_args`和`add_args`分别指定从DNN层中获取哪些参数和要为BNN层的哪些参数重新赋值。 如何在MindSpore中使用API`TransformToBNN`可以参考教程[DNN一键转换成BNN](https://www.mindspore.cn/tutorial/training/zh-CN/master/advanced_use/apply_deep_probability_programming.html#dnnbnn) ## 贝叶斯工具箱 贝叶斯神经网络的优势之一就是可以获取不确定性,MDP在上层提供了不确定性估计的工具箱(`mindspore.nn.probability.toolbox`),用户可以很方便地使用该工具箱计算不确定性。不确定性意味着深度学习模型对预测结果的不确定程度。目前,大多数深度学习算法只能给出高置信度的预测结果,而不能判断预测结果的确定性,不确定性主要有两种类型:偶然不确定性和认知不确定性。 - 偶然不确定性(Aleatoric Uncertainty):描述数据中的内在噪声,即无法避免的误差,这个现象不能通过增加采样数据来削弱。 - 认知不确定性(Epistemic Uncertainty):模型自身对输入数据的估计可能因为训练不佳、训练数据不够等原因而不准确,可以通过增加训练数据等方式来缓解。 不确定性评估工具箱的接口如下: - `model`:待评估不确定性的已训练好的模型。 - `train_dataset`:用于训练的数据集,迭代器类型。 - `task_type`:模型的类型,字符串,输入“regression”或者“classification”。 - `num_classes`:如果是分类模型,需要指定类别的标签数量。 - `epochs`:用于训练不确定模型的迭代数。 - `epi_uncer_model_path`:用于存储或加载计算认知不确定性的模型的路径。 - `ale_uncer_model_path`:用于存储或加载计算偶然不确定性的模型的路径。 - `save_model`:布尔类型,是否需要存储模型。 在使用前,需要先训练好模型,以LeNet5为例,使用方式如下:
0.805861
0.82887
# Machine Learning Engineer Nanodegree ## Reinforcement Learning ## Project: Train a Smartcab to Drive Welcome to the fourth project of the Machine Learning Engineer Nanodegree! In this notebook, template code has already been provided for you to aid in your analysis of the *Smartcab* and your implemented learning algorithm. You will not need to modify the included code beyond what is requested. There will be questions that you must answer which relate to the project and the visualizations provided in the notebook. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide in `agent.py`. >**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode. ----- ## Getting Started In this project, you will work towards constructing an optimized Q-Learning driving agent that will navigate a *Smartcab* through its environment towards a goal. Since the *Smartcab* is expected to drive passengers from one location to another, the driving agent will be evaluated on two very important metrics: **Safety** and **Reliability**. A driving agent that gets the *Smartcab* to its destination while running red lights or narrowly avoiding accidents would be considered **unsafe**. Similarly, a driving agent that frequently fails to reach the destination in time would be considered **unreliable**. Maximizing the driving agent's **safety** and **reliability** would ensure that *Smartcabs* have a permanent place in the transportation industry. **Safety** and **Reliability** are measured using a letter-grade system as follows: | Grade | Safety | Reliability | |:-----: |:------: |:-----------: | | A+ | Agent commits no traffic violations,<br/>and always chooses the correct action. | Agent reaches the destination in time<br />for 100% of trips. | | A | Agent commits few minor traffic violations,<br/>such as failing to move on a green light. | Agent reaches the destination on time<br />for at least 90% of trips. | | B | Agent commits frequent minor traffic violations,<br/>such as failing to move on a green light. | Agent reaches the destination on time<br />for at least 80% of trips. | | C | Agent commits at least one major traffic violation,<br/> such as driving through a red light. | Agent reaches the destination on time<br />for at least 70% of trips. | | D | Agent causes at least one minor accident,<br/> such as turning left on green with oncoming traffic. | Agent reaches the destination on time<br />for at least 60% of trips. | | F | Agent causes at least one major accident,<br />such as driving through a red light with cross-traffic. | Agent fails to reach the destination on time<br />for at least 60% of trips. | To assist evaluating these important metrics, you will need to load visualization code that will be used later on in the project. Run the code cell below to import this code which is required for your analysis. ``` # Import the visualization code import visuals as vs # Pretty display for notebooks %matplotlib inline ``` ### Understand the World Before starting to work on implementing your driving agent, it's necessary to first understand the world (environment) which the *Smartcab* and driving agent work in. One of the major components to building a self-learning agent is understanding the characteristics about the agent, which includes how the agent operates. To begin, simply run the `agent.py` agent code exactly how it is -- no need to make any additions whatsoever. Let the resulting simulation run for some time to see the various working components. Note that in the visual simulation (if enabled), the **white vehicle** is the *Smartcab*. ### Question 1 In a few sentences, describe what you observe during the simulation when running the default `agent.py` agent code. Some things you could consider: - *Does the Smartcab move at all during the simulation?* - *What kind of rewards is the driving agent receiving?* - *How does the light changing color affect the rewards?* **Hint:** From the `/smartcab/` top-level directory (where this notebook is located), run the command ```bash 'python smartcab/agent.py' ``` **Answer:** - The Smartcab does not move during the simulation. By default, the Smartcab (labeled with the Udacity logo) remains stationary at its initial intersection. - Random seeding is applied to the problem, i.e., the Smartcab is placed in a different starting position with each simulation, likewise, starting positions and travel directions by other cars are randomly seeded as well. - Visual information is quite coarse, e.g., turn-signals are not apparent on other cars (difficult to tell if on-coming traffic are making left turns, in which case, a Smartcab left turn would be allowable as well). Also relative velocities are hard to discern, e.g., if stopped at a red light, wishing to make a legal right turn, it's unclear how much time the Smartcar has before right-of-way traffic headed towards the intersection actually passes through it. - Floating point rewards (> 0) and penalities (0 <) are being assigned to the Smartcab after each time-step based on its previous action. - Penalities (negative rewards) are expected, e.g., idling at a green light with no oncoming traffic, as well are rewards, e.g., idling at red light. - Assigned rewards are not consistently valued, e.g., the following sequential rewards were observed: idling at a green light with no oncoming traffic was penalized (-5.83), but continuing to idle at the same green light lessened slightly (-5.59), and then worsened (-5.95). Idling at a green light with oncoming traffic was provided marginal reward (0.0 ~ 2.0). Idling at a red light provided a larger reward (1 ~ 3). ### Understand the Code In addition to understanding the world, it is also necessary to understand the code itself that governs how the world, simulation, and so on operate. Attempting to create a driving agent would be difficult without having at least explored the *"hidden"* devices that make everything work. In the `/smartcab/` top-level directory, there are two folders: `/logs/` (which will be used later) and `/smartcab/`. Open the `/smartcab/` folder and explore each Python file included, then answer the following question. ### Question 2 - *In the *`agent.py`* Python file, choose three flags that can be set and explain how they change the simulation.* - *In the *`environment.py`* Python file, what Environment class function is called when an agent performs an action?* - *In the *`simulator.py`* Python file, what is the difference between the *`'render_text()'`* function and the *`'render()'`* function?* - *In the *`planner.py`* Python file, will the *`'next_waypoint()`* function consider the North-South or East-West direction first?* **Answer:** In `agent.py` several exist, e.g., several flags set for the driving environment: - `verbose`: _(boolean)_ setting to True displays additional output from the simulation - `num_dummies`: _(integer)_ number of dummy agents in the environment - `grid_size`: _(integer)_ number of intersections (columns, rows) In `environment.py`, the `act` function considers the action taken by the Smartcab and performs it if legal. In addition, it also assigns a reward according to the based on local traffic laws. In `simulator.py`, `render_text()` writes simulation updates into the console, while `render()` updates the GUI traffic map. In `planner.py`, the `next_waypoint()` function considers the East-West direction of travel, before North-South. ----- ## Implement a Basic Driving Agent The first step to creating an optimized Q-Learning driving agent is getting the agent to actually take valid actions. In this case, a valid action is one of `None`, (do nothing) `'Left'` (turn left), `'Right'` (turn right), or `'Forward'` (go forward). For your first implementation, navigate to the `'choose_action()'` agent function and make the driving agent randomly choose one of these actions. Note that you have access to several class variables that will help you write this functionality, such as `'self.learning'` and `'self.valid_actions'`. Once implemented, run the agent file and simulation briefly to confirm that your driving agent is taking a random action each time step. ### Basic Agent Simulation Results To obtain results from the initial simulation, you will need to adjust following flags: - `'enforce_deadline'` - Set this to `True` to force the driving agent to capture whether it reaches the destination in time. - `'update_delay'` - Set this to a small value (such as `0.01`) to reduce the time between steps in each trial. - `'log_metrics'` - Set this to `True` to log the simluation results as a `.csv` file in `/logs/`. - `'n_test'` - Set this to `'10'` to perform 10 testing trials. Optionally, you may disable to the visual simulation (which can make the trials go faster) by setting the `'display'` flag to `False`. Flags that have been set here should be returned to their default setting when debugging. It is important that you understand what each flag does and how it affects the simulation! Once you have successfully completed the initial simulation (there should have been 20 training trials and 10 testing trials), run the code cell below to visualize the results. Note that log files are overwritten when identical simulations are run, so be careful with what log file is being loaded! Run the agent.py file after setting the flags from projects/smartcab folder instead of projects/smartcab/smartcab. ``` # Load the 'sim_no-learning' log file from the initial simulation results vs.plot_trials('sim_no-learning.csv') ``` ### Question 3 Using the visualization above that was produced from your initial simulation, provide an analysis and make several observations about the driving agent. Be sure that you are making at least one observation about each panel present in the visualization. Some things you could consider: - *How frequently is the driving agent making bad decisions? How many of those bad decisions cause accidents?* - *Given that the agent is driving randomly, does the rate of reliabilty make sense?* - *What kind of rewards is the agent receiving for its actions? Do the rewards suggest it has been penalized heavily?* - *As the number of trials increases, does the outcome of results change significantly?* - *Would this Smartcab be considered safe and/or reliable for its passengers? Why or why not?* **Answer:** * *How frequently is the driving agent making bad decisions? How many of those bad decisions cause accidents?* * On average, the agent is making bad decisions ~40% of the time. * On average, ~25% of these bad decisions are causing accidents, i.e., ~10% of all decisions lead to accidents. * *Given that the agent is driving randomly, does the rate of reliabilty make sense?* * Somewhat; it can certainly be justified. Given the actions (4 choices: None, L, R, F) and environment (traffic lights and other traffic), it seems like bad behavior is a bit of a coin flip. Let's further rationalize this: * Assume that agent is equally likely (p = 0.5) of encountering red or green light * For green lights, R, F, are always valid actions. Additionally, when there is no oncoming traffic (assume 80% of the time), L is also valid. * For red lights, None is always a valid action, as is R with no oncoming traffic (assume 80% of the time). * Computing the probabilities of success, if we assume R, F, L, None are all equally likely of occurring, for green lights, we're reliable 65% of the time (unreliable 35%); for red lights, a reliablilty of 45% is obtained (55% unreliable). * Averaging the unreliable probabilities for both light situations (since either is equally likely), the unreliable rate is 45%. Quite close to the ~40% observed! * *What kind of rewards is the agent receiving for its actions? Do the rewards suggest it has been penalized heavily?* * On average, the rewards are negative, and close to -5, which suggests that it has been heavily penalized. * *As the number of trials increases, does the outcome of results change significantly?* * Several observations can be made: * _Frequency of bad actions_, _exploration factor_, and _learning factor_ remain consistent over all trials. * Although there is some scatter with _average reward per action_ and _rate of reliability_, rerunning this simulation multiple times demonstrated no correlation to age of trial. * *Would this Smartcab be considered safe and/or reliable for its passengers? Why or why not?* * In no universe would this Smartcab be considered safe or reliable -- unless it's competing in a destruction derby, then yes.. Actions have a 40% chance of being violations or accidents - incredibly dangerous! Worse yet, the agent never achieves a reliability score above 20% -- abysmal. You'd likely accomplish similar performance while driving drunk, blindfolded, with earplugs, covered in ants. ----- ## Inform the Driving Agent The second step to creating an optimized Q-learning driving agent is defining a set of states that the agent can occupy in the environment. Depending on the input, sensory data, and additional variables available to the driving agent, a set of states can be defined for the agent so that it can eventually *learn* what action it should take when occupying a state. The condition of `'if state then action'` for each state is called a **policy**, and is ultimately what the driving agent is expected to learn. Without defining states, the driving agent would never understand which action is most optimal -- or even what environmental variables and conditions it cares about! ### Identify States Inspecting the `'build_state()'` agent function shows that the driving agent is given the following data from the environment: - `'waypoint'`, which is the direction the *Smartcab* should drive leading to the destination, relative to the *Smartcab*'s heading. - `'inputs'`, which is the sensor data from the *Smartcab*. It includes - `'light'`, the color of the light. - `'left'`, the intended direction of travel for a vehicle to the *Smartcab*'s left. Returns `None` if no vehicle is present. - `'right'`, the intended direction of travel for a vehicle to the *Smartcab*'s right. Returns `None` if no vehicle is present. - `'oncoming'`, the intended direction of travel for a vehicle across the intersection from the *Smartcab*. Returns `None` if no vehicle is present. - `'deadline'`, which is the number of actions remaining for the *Smartcab* to reach the destination before running out of time. ### Question 4 *Which features available to the agent are most relevant for learning both **safety** and **efficiency**? Why are these features appropriate for modeling the *Smartcab* in the environment? If you did not choose some features, why are those features* not *appropriate?* **Answer:** Tricky question - it asks, _which features_ (implying multiple features) _are the most relevant_ (implying a single prominent feature). First, let's begin with a discussion of features (assumed grouping of `'waypoint'`, `'inputs'`, and `'deadline'`): * `'input'` is the only one that prescribes what manuevers can safely be performed. * `'waypoint'` is the only feature that can direct the smartcar along the correct direction of travel. Not knowning the final destination turns the policy into a random walk, where we hope to (randomly) arrive at the destination. * `'deadline'` can be used to adjust policy, i.e., if time is not a consideration, the car can select safer actions, but if the deadline is of concern, it can be much more aggressive. Now, the features most relevant to safety and efficiency appear to be **`'input'`**, for safey, and **`'waypoint'`**, for efficiency. Next, a detailed discussion follows that ### Define a State Space When defining a set of states that the agent can occupy, it is necessary to consider the *size* of the state space. That is to say, if you expect the driving agent to learn a **policy** for each state, you would need to have an optimal action for *every* state the agent can occupy. If the number of all possible states is very large, it might be the case that the driving agent never learns what to do in some states, which can lead to uninformed decisions. For example, consider a case where the following features are used to define the state of the *Smartcab*: `('is_raining', 'is_foggy', 'is_red_light', 'turn_left', 'no_traffic', 'previous_turn_left', 'time_of_day')`. How frequently would the agent occupy a state like `(False, True, True, True, False, False, '3AM')`? Without a near-infinite amount of time for training, it's doubtful the agent would ever learn the proper action! ### Question 5 *If a state is defined using the features you've selected from **Question 4**, what would be the size of the state space? Given what you know about the evironment and how it is simulated, do you think the driving agent could learn a policy for each possible state within a reasonable number of training trials?* **Hint:** Consider the *combinations* of features to calculate the total number of states! **Answer:** If `'inputs'` and `'waypoint'` are used to direct the smartcab, there are following states: * (2 states) `'input'` `'light'`: red (stop), green (go) * (0 states) `'input'` `'right'`: * (2 states) `'input'` `'left'`: forward (True/False) * (3 states) `'input'` `'oncoming'`: None, forward, right * (3 states) `'waypoint'`: forward, left, right Talking through our minimum state set: * Light states (green/red) are mandatory. Additionally, based upon their state, we can apply common right-of-way rules to reduce the number of other states. * Vehicles travelling from the right are helpful but not necessary. As the smartcab moves through an intersection, regardless of its direction (forward, left, right), it does not need to know what vehicles are travelling from East to West, i.e., `'right'` vehicles. * Vehicles travelling from the left are helpful, but it is only necessary to know if they are going forward. Those that are turning right will never collide (when obeying traffic rules). Those that are turning left can only do so when they have a green light, which implies our smartcab has a red light. * Oncoming vehicles provide more information than right or left vehicles, but not each state is necessary. The smartcab requires knowledge of its action, except for left turns, since in undertaking this action, it would be foreced to yeild to the smartcab. * Waypoints are mandatory. Without them, it'd be a random walk to get to the destination. The product sum of all states (2, 2, 3, 3) is equal to the total number of combinations, which is 36. Had the number of states not been paired back (2, 4, 4, 4, 3) we would have had nearly a order of magnitude more (384)! ### Update the Driving Agent State For your second implementation, navigate to the `'build_state()'` agent function. With the justification you've provided in **Question 4**, you will now set the `'state'` variable to a tuple of all the features necessary for Q-Learning. Confirm your driving agent is updating its state by running the agent file and simulation briefly and note whether the state is displaying. If the visual simulation is used, confirm that the updated state corresponds with what is seen in the simulation. **Note:** Remember to reset simulation flags to their default setting when making this observation! ----- ## Implement a Q-Learning Driving Agent The third step to creating an optimized Q-Learning agent is to begin implementing the functionality of Q-Learning itself. The concept of Q-Learning is fairly straightforward: For every state the agent visits, create an entry in the Q-table for all state-action pairs available. Then, when the agent encounters a state and performs an action, update the Q-value associated with that state-action pair based on the reward received and the interative update rule implemented. Of course, additional benefits come from Q-Learning, such that we can have the agent choose the *best* action for each state based on the Q-values of each state-action pair possible. For this project, you will be implementing a *decaying,* $\epsilon$*-greedy* Q-learning algorithm with *no* discount factor. Follow the implementation instructions under each **TODO** in the agent functions. Note that the agent attribute `self.Q` is a dictionary: This is how the Q-table will be formed. Each state will be a key of the `self.Q` dictionary, and each value will then be another dictionary that holds the *action* and *Q-value*. Here is an example: ``` { 'state-1': { 'action-1' : Qvalue-1, 'action-2' : Qvalue-2, ... }, 'state-2': { 'action-1' : Qvalue-1, ... }, ... } ``` Furthermore, note that you are expected to use a *decaying* $\epsilon$ *(exploration) factor*. Hence, as the number of trials increases, $\epsilon$ should decrease towards 0. This is because the agent is expected to learn from its behavior and begin acting on its learned behavior. Additionally, The agent will be tested on what it has learned after $\epsilon$ has passed a certain threshold (the default threshold is 0.01). For the initial Q-Learning implementation, you will be implementing a linear decaying function for $\epsilon$. ### Q-Learning Simulation Results To obtain results from the initial Q-Learning implementation, you will need to adjust the following flags and setup: - `'enforce_deadline'` - Set this to `True` to force the driving agent to capture whether it reaches the destination in time. - `'update_delay'` - Set this to a small value (such as `0.01`) to reduce the time between steps in each trial. - `'log_metrics'` - Set this to `True` to log the simluation results as a `.csv` file and the Q-table as a `.txt` file in `/logs/`. - `'n_test'` - Set this to `'10'` to perform 10 testing trials. - `'learning'` - Set this to `'True'` to tell the driving agent to use your Q-Learning implementation. In addition, use the following decay function for $\epsilon$: $$ \epsilon_{t+1} = \epsilon_{t} - 0.05, \hspace{10px}\textrm{for trial number } t$$ If you have difficulty getting your implementation to work, try setting the `'verbose'` flag to `True` to help debug. Flags that have been set here should be returned to their default setting when debugging. It is important that you understand what each flag does and how it affects the simulation! Once you have successfully completed the initial Q-Learning simulation, run the code cell below to visualize the results. Note that log files are overwritten when identical simulations are run, so be careful with what log file is being loaded! ``` # Load the 'sim_default-learning' file from the default Q-Learning simulation vs.plot_trials('sim_default-learning_saved.csv') ``` ### Question 6 Using the visualization above that was produced from your default Q-Learning simulation, provide an analysis and make observations about the driving agent like in **Question 3**. Note that the simulation should have also produced the Q-table in a text file which can help you make observations about the agent's learning. Some additional things you could consider: - *Are there any observations that are similar between the basic driving agent and the default Q-Learning agent?* - *Approximately how many training trials did the driving agent require before testing? Does that number make sense given the epsilon-tolerance?* - *Is the decaying function you implemented for $\epsilon$ (the exploration factor) accurately represented in the parameters panel?* - *As the number of training trials increased, did the number of bad actions decrease? Did the average reward increase?* - *How does the safety and reliability rating compare to the initial driving agent?* **Answer:** * *Are there any observations that are similar between the basic driving agent and the default Q-Learning agent?* * Overall, although improvements are visible for the improved Q-Learning agent, the safety and reliability of the basic driving agent (no learning) and default Q-Learning agent are commensurate; both learners received failing grades for safety and reliability. * Rolling rate of reliability looks similar between agents. * Rolling (average) reward per action are _roughly_ similar between agents. There is a slight improvement towards 19th and 20th trials, but it may be the result of a stochastic process, rather than a true learning improvement. * Learning factors are constant (0.5) for both agents. * *Approximately how many training trials did the driving agent require before testing? Does that number make sense given the epsilon-tolerance?* * Twenty training trials occured, which given the epsilon parameters (initial of 1.0, step sizes of -0.05) is expected, since 1.0/0.05 = 20. * *Is the decaying function you implemented for ϵϵ (the exploration factor) accurately represented in the parameters panel?* * Yes, a simple linear function, with a slope of -0.05 is shown. * *As the number of training trials increased, did the number of bad actions decrease? Did the average reward increase?* * In a favorable manner, the number of bad actions decreased with the number of training trials, by roughly 30%. * Toward the end of training, the average rolling reward increased by a small margin. * *How does the safety and reliability rating compare to the initial driving agent?* * As mentioned earlier, unfortunately, the Q-Learning agent scored as poorly as the basic agent. ----- ## Improve the Q-Learning Driving Agent The third step to creating an optimized Q-Learning agent is to perform the optimization! Now that the Q-Learning algorithm is implemented and the driving agent is successfully learning, it's necessary to tune settings and adjust learning paramaters so the driving agent learns both **safety** and **efficiency**. Typically this step will require a lot of trial and error, as some settings will invariably make the learning worse. One thing to keep in mind is the act of learning itself and the time that this takes: In theory, we could allow the agent to learn for an incredibly long amount of time; however, another goal of Q-Learning is to *transition from experimenting with unlearned behavior to acting on learned behavior*. For example, always allowing the agent to perform a random action during training (if $\epsilon = 1$ and never decays) will certainly make it *learn*, but never let it *act*. When improving on your Q-Learning implementation, consider the impliciations it creates and whether it is logistically sensible to make a particular adjustment. ### Improved Q-Learning Simulation Results To obtain results from the initial Q-Learning implementation, you will need to adjust the following flags and setup: - `'enforce_deadline'` - Set this to `True` to force the driving agent to capture whether it reaches the destination in time. - `'update_delay'` - Set this to a small value (such as `0.01`) to reduce the time between steps in each trial. - `'log_metrics'` - Set this to `True` to log the simluation results as a `.csv` file and the Q-table as a `.txt` file in `/logs/`. - `'learning'` - Set this to `'True'` to tell the driving agent to use your Q-Learning implementation. - `'optimized'` - Set this to `'True'` to tell the driving agent you are performing an optimized version of the Q-Learning implementation. Additional flags that can be adjusted as part of optimizing the Q-Learning agent: - `'n_test'` - Set this to some positive number (previously 10) to perform that many testing trials. - `'alpha'` - Set this to a real number between 0 - 1 to adjust the learning rate of the Q-Learning algorithm. - `'epsilon'` - Set this to a real number between 0 - 1 to adjust the starting exploration factor of the Q-Learning algorithm. - `'tolerance'` - set this to some small value larger than 0 (default was 0.05) to set the epsilon threshold for testing. Furthermore, use a decaying function of your choice for $\epsilon$ (the exploration factor). Note that whichever function you use, it **must decay to **`'tolerance'`** at a reasonable rate**. The Q-Learning agent will not begin testing until this occurs. Some example decaying functions (for $t$, the number of trials): $$ \epsilon = a^t, \textrm{for } 0 < a < 1 \hspace{50px}\epsilon = \frac{1}{t^2}\hspace{50px}\epsilon = e^{-at}, \textrm{for } 0 < a < 1 \hspace{50px} \epsilon = \cos(at), \textrm{for } 0 < a < 1$$ You may also use a decaying function for $\alpha$ (the learning rate) if you so choose, however this is typically less common. If you do so, be sure that it adheres to the inequality $0 \leq \alpha \leq 1$. If you have difficulty getting your implementation to work, try setting the `'verbose'` flag to `True` to help debug. Flags that have been set here should be returned to their default setting when debugging. It is important that you understand what each flag does and how it affects the simulation! Once you have successfully completed the improved Q-Learning simulation, run the code cell below to visualize the results. Note that log files are overwritten when identical simulations are run, so be careful with what log file is being loaded! ``` # Load the 'sim_improved-learning' file from the improved Q-Learning simulation vs.plot_trials('sim_improved-learning_saved.csv') ``` ### Question 7 Using the visualization above that was produced from your improved Q-Learning simulation, provide a final analysis and make observations about the improved driving agent like in **Question 6**. Questions you should answer: - *What decaying function was used for epsilon (the exploration factor)?* - *Approximately how many training trials were needed for your agent before begining testing?* - *What epsilon-tolerance and alpha (learning rate) did you use? Why did you use them?* - *How much improvement was made with this Q-Learner when compared to the default Q-Learner from the previous section?* - *Would you say that the Q-Learner results show that your driving agent successfully learned an appropriate policy?* - *Are you satisfied with the safety and reliability ratings of the *Smartcab*?* **Answer:** Note, the answers below refer to a 'frozen' implementation, which seemed to work reasonably well and return consistent results. Several epsilon and alpha functions were considered, e.g., constant, linearly decreasing, quadratically decreasing, exponentially decreasing. Although higher reliability ratings were occasionally found, results were not strongly repeatable, or the (seemingly beneficial) tactics employed did not have a true impact, e.g., lowering test trial count, lead to higher variabililty of test output, which sometimes resulted in a better grade. - *What decaying function was used for epsilon (the exploration factor)? What epsilon-tolerance and alpha (learning rate) did you use? Why did you use them?** * For epsilon, the following functions were used: $$ \epsilon = \epsilon_0 - 1 + e^{(-a t)} $$ where $$ \epsilon_0 = 1.0, \quad a = 1 \times 10^{-2}, \quad \triangle t = 0.0005 $$ * For alpha, a comparable function was used: $$ \alpha = \alpha_0 - 1 + e^{(-a t)} $$ where $$ \alpha_0 = 0.8, \quad a = 1 \times 10^{-3}, \quad \triangle t = 0.0005 $$ - *Approximately how many training trials were needed for your agent before begining testing?* About 600 trials. - *How much improvement was made with this Q-Learner when compared to the default Q-Learner from the previous section?* * Safety rating has improved drastically, from failing (F) to excellent (A+). * Reliability has also improved significantly. - *Would you say that the Q-Learner results show that your driving agent successfully learned an appropriate policy. Are you satisfied with the safety and reliability ratings of the *Smartcab*?* * Q-Learning learned a policy, and based on the reward system, has favored safety over reliability. * Since the negative rewards from obtaining a safety violation are much more significant than positive reliability-based rewards (from correctly delivering a passenger to their destination), during the learning process, the smartcab first makes choices that do not result in safety violations, and second, attempts to obtain reliability-based rewards. When the number of trials is reduced, the smartcab was able to ace the safety score, but blow the reliable score. This behavior was consistent for the various epsilon and alpha decay functions selected. ### Define an Optimal Policy Sometimes, the answer to the important question *"what am I trying to get my agent to learn?"* only has a theoretical answer and cannot be concretely described. Here, however, you can concretely define what it is the agent is trying to learn, and that is the U.S. right-of-way traffic laws. Since these laws are known information, you can further define, for each state the *Smartcab* is occupying, the optimal action for the driving agent based on these laws. In that case, we call the set of optimal state-action pairs an **optimal policy**. Hence, unlike some theoretical answers, it is clear whether the agent is acting "incorrectly" not only by the reward (penalty) it receives, but also by pure observation. If the agent drives through a red light, we both see it receive a negative reward but also know that it is not the correct behavior. This can be used to your advantage for verifying whether the **policy** your driving agent has learned is the correct one, or if it is a **suboptimal policy**. ### Question 8 Provide a few examples (using the states you've defined) of what an optimal policy for this problem would look like. Afterwards, investigate the `'sim_improved-learning.txt'` text file to see the results of your improved Q-Learning algorithm. _For each state that has been recorded from the simulation, is the **policy** (the action with the highest value) correct for the given state? Are there any states where the policy is different than what would be expected from an optimal policy?_ Provide an example of a state and all state-action rewards recorded, and explain why it is the correct policy. **Answer:** Reviewing the 36 policies identified (it was able to exhaustively visit every one identified earlier), the overwhelming policies were optimal. However in addition to the optimial policies, some suboptimal policies have been highlighted below. Recall the states defined earlier: * (2 states) `'input'` `'light'`: red (stop), green (go) * (2 states) `'input'` `'left'`: forward (True/False) * (3 states) `'input'` `'oncoming'`: None, forward, right * (3 states) `'waypoint'`: forward, left, right (1) Consider the following input scenario: ('red', False, None, 'right'), which leads to the following policy ratings: * right: optimal * None: suboptimal * left: incorrect * right: incorrect According to the smartcab learning log, 'forward' = -11.23, **None = 2.06**, 'right' = 1.16, 'left' = -10.06, which is **suboptimal**. While suboptimal, it's a safe manuever that still allowed the smartcab to obtain an ideal reliability rating during test. Had the negative rewards accumulated faster (or been of larger magnitude) it is likely that 'right' would have secured the greatest Q value through additional testing rounds. (2) Consider a small variant of the previous input scenario: ('red', False, 'forward', 'right'), which leads to the following policy ratings: * right: optimal * None: suboptimal * left: incorrect * right: incorrect According to the smartcab log, 'forward' : -30.18, **'None' : 2.23**, 'right' : 0.99, 'left' : -37.58, which again is **suboptimal**, and the previous commentary still appears to be relevant. (3) Consider the following input scenario: ('red', False, None, 'forward'), which leads to the following policy ratings: * None: optimal * right: suboptimal * left: incorrect * right: incorrect According to the smartcab learning log, 'forward' = -16.83, **None = 1.69**, 'right' = 0.48, 'left' = -13.75, which is optimal! In all cases, it's interesting to note that the smartcab does incredibly well in identifying the incorrect action. This is to be expected, as the negative rewards received for safety violations far outweighs the positive rewards received for being reliabilty correct. ----- ### Optional: Future Rewards - Discount Factor, `'gamma'` Curiously, as part of the Q-Learning algorithm, you were asked to **not** use the discount factor, `'gamma'` in the implementation. Including future rewards in the algorithm is used to aid in propogating positive rewards backwards from a future state to the current state. Essentially, if the driving agent is given the option to make several actions to arrive at different states, including future rewards will bias the agent towards states that could provide even more rewards. An example of this would be the driving agent moving towards a goal: With all actions and rewards equal, moving towards the goal would theoretically yield better rewards if there is an additional reward for reaching the goal. However, even though in this project, the driving agent is trying to reach a destination in the allotted time, including future rewards will not benefit the agent. In fact, if the agent were given many trials to learn, it could negatively affect Q-values! ### Optional Question 9 *There are two characteristics about the project that invalidate the use of future rewards in the Q-Learning algorithm. One characteristic has to do with the *Smartcab* itself, and the other has to do with the environment. Can you figure out what they are and why future rewards won't work for this project?* **Answer:** > **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
github_jupyter
# Import the visualization code import visuals as vs # Pretty display for notebooks %matplotlib inline 'python smartcab/agent.py' # Load the 'sim_no-learning' log file from the initial simulation results vs.plot_trials('sim_no-learning.csv') { 'state-1': { 'action-1' : Qvalue-1, 'action-2' : Qvalue-2, ... }, 'state-2': { 'action-1' : Qvalue-1, ... }, ... } # Load the 'sim_default-learning' file from the default Q-Learning simulation vs.plot_trials('sim_default-learning_saved.csv') # Load the 'sim_improved-learning' file from the improved Q-Learning simulation vs.plot_trials('sim_improved-learning_saved.csv')
0.639173
0.9921
# Use scikit-learn to predict diabetes progression with Watson Machine Learning REST API This notebook contains steps and code to demonstrate support of external machine learning models in Watson Machine Learning Service. This notebook introduces `cURL` calls for publishing, deploying (Web Service) and scoring ML scikit model. Some familiarity with cURL is helpful. This notebook uses cURL examples. This example was made based on `diabetes` dataset and trained scikit ML model that could be found inside the same repository under as follows: - `/data/diabetes/` - `/models/scikit/diabetes/` ## Learning goals The learning goals of this notebook are: - Working with Watson Machine Learning repository, deployment and scoring. ## Contents This notebook contains the following parts: 1. [Setup](#setup) 2. [WML Repository](#wml_repository) 3. [Model Deployment and Scoring](#deploy_and_score) 4. [Persist new version of the model](#update_model) 5. [Redeploy and score new version of the model](#redeploy) 6. [Cleaning](#cleaning) 7. [Summary](#summary) <a id="setup"></a> ## 1. Set up the environment Before you use the sample code in this notebook, you must perform the following setup tasks: - Create a <a href="https://console.ng.bluemix.net/catalog/services/ibm-watson-machine-learning/" target="_blank" rel="noopener no referrer">Watson Machine Learning (WML) Service</a> instance (a free plan is offered and information about how to create the instance can be found <a href="https://dataplatform.cloud.ibm.com/docs/content/wsj/analyze-data/ml-service-instance.html?context=analytics" target="_blank" rel="noopener no referrer">here</a>). You can find your COS credentials in COS instance dashboard under the **Service credentials** tab. Go to the **Endpoint** tab in the COS instance's dashboard to get the endpoint information. Authenticate the Watson Machine Learning service on IBM Cloud. Your Cloud API key can be generated by going to the [**Users** section of the Cloud console](https://cloud.ibm.com/iam#/users). From that page, click your name, scroll down to the **API Keys** section, and click **Create an IBM Cloud API key**. Give your key a name and click **Create**, then copy the created key and paste it below. **NOTE:** You can also get service specific apikey by going to the [**Service IDs** section of the Cloud Console](https://cloud.ibm.com/iam/serviceids). From that page, click **Create**, then copy the created key and paste it below. ``` %env API_KEY=... %env WML_ENDPOINT_URL=... %env WML_INSTANCE_CRN="fill out only if you want to create a new space" %env WML_INSTANCE_NAME=... %env COS_CRN="fill out only if you want to create a new space" %env COS_ENDPOINT=... %env COS_BUCKET=... %env COS_ACCESS_KEY_ID=... %env COS_SECRET_ACCESS_KEY=... %env COS_API_KEY=... %env SPACE_ID="fill out only if you have space already created" %env DATAPLATFORM_URL=https://api.dataplatform.cloud.ibm.com %env AUTH_ENDPOINT=https://iam.cloud.ibm.com/oidc/token ``` <a id="wml_token"></a> ### Getting WML authorization token for further cURL calls <a href="https://cloud.ibm.com/docs/cloud-object-storage?topic=cloud-object-storage-curl#curl-token" target="_blank" rel="noopener no referrer">Example of cURL call to get WML token</a> ``` %%bash --out token curl -sk -X POST \ --header "Content-Type: application/x-www-form-urlencoded" \ --header "Accept: application/json" \ --data-urlencode "grant_type=urn:ibm:params:oauth:grant-type:apikey" \ --data-urlencode "apikey=$API_KEY" \ "$AUTH_ENDPOINT" \ | cut -d '"' -f 4 %env TOKEN=$token ``` <a id="space_creation"></a> ### Space creation **Tip:** If you do not have `space` already created, please convert below three cells to `code` and run them. First of all, you need to create a `space` that will be used in all of your further cURL calls. If you do not have `space` already created, below is the cURL call to create one. <a href="https://cpd-spaces-api.eu-gb.cf.appdomain.cloud/#/Spaces/spaces_create" target="_blank" rel="noopener no referrer">Space creation</a> Space creation is asynchronous. This means that you need to check space creation status after creation call. Make sure that your newly created space is `active`. <a href="https://cpd-spaces-api.eu-gb.cf.appdomain.cloud/#/Spaces/spaces_get" target="_blank" rel="noopener no referrer">Get space information</a> --- <a id="wml_repository"></a> ## 2. Manage WML Repository In this section you will learn how to upload your ML model, list your models stored in repository, delete your model and update it. <a id="model_storing"></a> ### Model storing Store information about your model to WML repository. <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Models/models_create" target="_blank" rel="noopener no referrer">Model storing</a> ``` %%bash --out model_payload MODEL_PAYLOAD='{"space_id": "'"$SPACE_ID"'","name": "scikit_diabetes_model","description": "This is description","type": "scikit-learn_0.23", "software_spec": {"name": "default_py3.7"}}' echo $MODEL_PAYLOAD | python -m json.tool %env MODEL_PAYLOAD=$model_payload %%bash --out model_id -s "$model_payload" curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$MODEL_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/models?version=2020-08-01" | grep '"id": ' | awk -F '"' '{ print $4 }' | sed -n 2p %env MODEL_ID=$model_id ``` ### Create model revision for further update <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Models/models_create_revision" target="_blank" rel="noopener no referrer">Model revision</a> ``` %%bash --out revision_payload REVISION_PAYLOAD='{"space_id": "'"$SPACE_ID"'", "commit_message": "Initial model."}' echo $REVISION_PAYLOAD | python -m json.tool %env REVISION_PAYLOAD=$revision_payload %%bash curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$REVISION_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/models/$MODEL_ID/revisions?version=2020-08-01" \ | python -m json.tool ``` <a id="content_upload"></a> ### Model content upload Now you need to upload your model content into the WML repository. <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Models/models_upload_content" target="_blank" rel="noopener no referrer">Upload model content</a> ``` !wget https://github.com/IBM/watson-machine-learning-samples/raw/master/cloud/models/scikit/diabetes/model/diabetes_model.tar.gz \ -O diabetes_model.tar.gz %%bash --out attachment_id curl -sk -X PUT \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/gzip" \ --header "Accept: application/json" \ --data-binary "@diabetes_model.tar.gz" \ "$WML_ENDPOINT_URL/ml/v4/models/$MODEL_ID/content?space_id=$SPACE_ID&version=2020-08-01&content_format=native" \ | grep "attachment_id" | awk -F '"' '{ print $4 }' %env ATTACHMENT_ID=$attachment_id ``` <a id="model_download"></a> ### Download model If you want to download your saved model, please make the following call. <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Models/models_filtered_download" target="_blank" rel="noopener no referrer">Download model content</a> ``` %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --output "model.tar.gz" \ "$WML_ENDPOINT_URL/ml/v4/models/$MODEL_ID/download?space_id=$SPACE_ID&version=2020-08-01" !ls -l model.tar.gz ``` <a id="deploy_and_score"></a> ## 3. Deploy and Score In this section you will learn how to deploy and score pipeline model as webservice using WML instance. <a id="deployment_creation"></a> ### Deployment creation This example uses scikit-learn model deployment and `S` hardware specification. <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Deployments/deployments_create" target="_blank" rel="noopener no referrer">Create deployment</a> ``` %%bash --out deployment_payload DEPLOYMENT_PAYLOAD='{"space_id": "'"$SPACE_ID"'","name": "Diabetes deployment", "description": "This is description","online": {},"hardware_spec": {"name": "S"},"asset": {"id": "'"$MODEL_ID"'"}}' echo $DEPLOYMENT_PAYLOAD | python -m json.tool %env DEPLOYMENT_PAYLOAD=$deployment_payload %%bash --out deployment_id curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$DEPLOYMENT_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/deployments?version=2020-08-01" | grep '"id": ' | awk -F '"' '{ print $4 }' | sed -n 3p %env DEPLOYMENT_ID=$deployment_id ``` <a id="deployment_details"></a> ### Get deployment details As deployment API is asynchronous, please make sure your deployment is in `ready` state before going to the next points. <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Deployments/deployments_get" target="_blank" rel="noopener no referrer">Get deployment details</a> ``` %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ "$WML_ENDPOINT_URL/ml/v4/deployments/$DEPLOYMENT_ID?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool ``` <a id="webservice_score"></a> ### Scoring of a webservice If you want to make a `score` call on your deployment, please follow a below method: <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Deployments/deployments_compute_predictions" target="_blank" rel="noopener no referrer">Score your deployment</a> ``` %%bash --out scoring_payload SCORING_PAYLOAD='{"space_id": "$SPACE_ID","input_data": [{"fields": ["age", "sex", "bmi", "bp", "s1", "s2", "s3", "s4", "s5", "s6"], "values": [[-0.00188201652779104, -0.044641636506989, -0.0514740612388061, -0.0263278347173518, -0.00844872411121698, -0.019163339748222, 0.0744115640787594, -0.0394933828740919, -0.0683297436244215, -0.09220404962683], [0.0852989062966783, 0.0506801187398187, 0.0444512133365941, -0.00567061055493425, -0.0455994512826475, -0.0341944659141195, -0.0323559322397657, -0.00259226199818282, 0.00286377051894013, -0.0259303389894746]]}]}' echo $SCORING_PAYLOAD | python -m json.tool %env SCORING_PAYLOAD=$scoring_payload %%bash curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$SCORING_PAYLOAD"\ "$WML_ENDPOINT_URL/ml/v4/deployments/$DEPLOYMENT_ID/predictions?version=2020-08-01" \ | python -m json.tool ``` <a id="deployments_list"></a> ### Listing all deployments <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Deployments/deployments_list" target="_blank" rel="noopener no referrer">List deployments details</a> ``` %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ "$WML_ENDPOINT_URL/ml/v4/deployments?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool ``` <a id="update_model"></a> ## 4. Persist new version of the model In this section, you'll learn how to store new version of your model in Watson Machine Learning repository. ### Model update Below you can find how ML model can be updated with new version on WML repository. List model revisions. <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Models/models_list_revisions" target="_blank" rel="noopener no referrer">List revisions</a> ``` %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ "$WML_ENDPOINT_URL/ml/v4/models/$MODEL_ID/revisions?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool ``` ### Create second model revision <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Models/models_create_revision" target="_blank" rel="noopener no referrer">Create model revision</a> ``` %%bash --out revision_payload REVISION_PAYLOAD='{"space_id": "'"$SPACE_ID"'", "commit_message": "Updated model."}' echo $REVISION_PAYLOAD | python -m json.tool %env REVISION_PAYLOAD=$revision_payload %%bash curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$REVISION_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/models/$MODEL_ID/revisions?version=2020-08-01" \ | python -m json.tool ``` ### Update model metadata For example update model name or description <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Models/models_update" target="_blank" rel="noopener no referrer">Patch model</a> ``` %%bash --out update_payload echo '[{"op": "add", "path": "/name", "value": "updated scikit model"}]' | python -m json.tool %env UPDATE_PAYLOAD=$update_payload %%bash curl -sk -X PATCH \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$UPDATE_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/models/$MODEL_ID?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool ``` ### Upload new model content ``` !wget https://github.com/IBM/watson-machine-learning-samples/raw/master/cloud/models/scikit/diabetes/model/new_diabetes_model.tar.gz \ -O new_diabetes_model.tar.gz ``` <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Models/models_upload_content" target="_blank" rel="noopener no referrer">Upload model content</a> ``` %%bash curl -sk -X PUT \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/gzip" \ --header "Accept: application/json" \ --data-binary "@new_diabetes_model.tar.gz" \ "$WML_ENDPOINT_URL/ml/v4/models/$MODEL_ID/content?space_id=$SPACE_ID&version=2020-08-01&content_format=native" \ | python -m json.tool ``` #### List model revisions to see a new one just created <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Models/models_list_revisions" target="_blank" rel="noopener no referrer">List revisions</a> ``` %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ "$WML_ENDPOINT_URL/ml/v4/models/$MODEL_ID/revisions?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool ``` Now we have updated model content and model name in repository. <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Models/models_get" target="_blank" rel="noopener no referrer">Get model details</a> ``` %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ "$WML_ENDPOINT_URL/ml/v4/models/$MODEL_ID?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool ``` <a id="redeploy"></a> ## 5. Redeploy and score new version of the model Below you can see how deployment can be updated with new version of the model without any change for scoring url. <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Deployments/deployments_update" target="_blank" rel="noopener no referrer">Deployment update</a> ``` %%bash --out redeploy_payload echo '[{"op": "replace", "path": "/asset", "value": {"id": "'"$MODEL_ID"'"}}]' \ | python -m json.tool %env REDEPLOY_PAYLOAD=$redeploy_payload %%bash curl -sk -X PATCH \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$REDEPLOY_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/deployments/$DEPLOYMENT_ID?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool ``` ### Score updated webservice ``` %%bash curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$SCORING_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/deployments/$DEPLOYMENT_ID/predictions?version=2020-08-01" \ | python -m json.tool ``` <a id="cleaning"></a> ## 6. Cleaning section Below section is useful when you want to clean all of your previous work within this notebook. Just convert below cells into the `code` and run them. <a id="deployment_delete"></a> ### Deleting deployment **Tip:** You can delete existing deployment by calling DELETE method. <a id="model_delete"></a> ### Delete model from repository **Tip:** If you want to completely remove your stored model and model metadata, just use a DELETE method. <a href="https://watson-ml-v4-api.mybluemix.net/wml-restapi-cloud.html#/Models/models_delete" target="_blank" rel="noopener no referrer">Delete model from repository</a> <a id="summary"></a> ## 7. Summary and next steps You successfully completed this notebook!. You learned how to use `cURL` calls to store, deploy and score a scikit-learn ML model in WML. ### Authors **Amadeusz Masny**, Python Software Developer in Watson Machine Learning at IBM **Jan Sołtysik**, Intern in Watson Machine Learning at IBM Copyright © 2020, 2021 IBM. This notebook and its source code are released under the terms of the MIT License.
github_jupyter
%env API_KEY=... %env WML_ENDPOINT_URL=... %env WML_INSTANCE_CRN="fill out only if you want to create a new space" %env WML_INSTANCE_NAME=... %env COS_CRN="fill out only if you want to create a new space" %env COS_ENDPOINT=... %env COS_BUCKET=... %env COS_ACCESS_KEY_ID=... %env COS_SECRET_ACCESS_KEY=... %env COS_API_KEY=... %env SPACE_ID="fill out only if you have space already created" %env DATAPLATFORM_URL=https://api.dataplatform.cloud.ibm.com %env AUTH_ENDPOINT=https://iam.cloud.ibm.com/oidc/token %%bash --out token curl -sk -X POST \ --header "Content-Type: application/x-www-form-urlencoded" \ --header "Accept: application/json" \ --data-urlencode "grant_type=urn:ibm:params:oauth:grant-type:apikey" \ --data-urlencode "apikey=$API_KEY" \ "$AUTH_ENDPOINT" \ | cut -d '"' -f 4 %env TOKEN=$token %%bash --out model_payload MODEL_PAYLOAD='{"space_id": "'"$SPACE_ID"'","name": "scikit_diabetes_model","description": "This is description","type": "scikit-learn_0.23", "software_spec": {"name": "default_py3.7"}}' echo $MODEL_PAYLOAD | python -m json.tool %env MODEL_PAYLOAD=$model_payload %%bash --out model_id -s "$model_payload" curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$MODEL_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/models?version=2020-08-01" | grep '"id": ' | awk -F '"' '{ print $4 }' | sed -n 2p %env MODEL_ID=$model_id %%bash --out revision_payload REVISION_PAYLOAD='{"space_id": "'"$SPACE_ID"'", "commit_message": "Initial model."}' echo $REVISION_PAYLOAD | python -m json.tool %env REVISION_PAYLOAD=$revision_payload %%bash curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$REVISION_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/models/$MODEL_ID/revisions?version=2020-08-01" \ | python -m json.tool !wget https://github.com/IBM/watson-machine-learning-samples/raw/master/cloud/models/scikit/diabetes/model/diabetes_model.tar.gz \ -O diabetes_model.tar.gz %%bash --out attachment_id curl -sk -X PUT \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/gzip" \ --header "Accept: application/json" \ --data-binary "@diabetes_model.tar.gz" \ "$WML_ENDPOINT_URL/ml/v4/models/$MODEL_ID/content?space_id=$SPACE_ID&version=2020-08-01&content_format=native" \ | grep "attachment_id" | awk -F '"' '{ print $4 }' %env ATTACHMENT_ID=$attachment_id %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --output "model.tar.gz" \ "$WML_ENDPOINT_URL/ml/v4/models/$MODEL_ID/download?space_id=$SPACE_ID&version=2020-08-01" !ls -l model.tar.gz %%bash --out deployment_payload DEPLOYMENT_PAYLOAD='{"space_id": "'"$SPACE_ID"'","name": "Diabetes deployment", "description": "This is description","online": {},"hardware_spec": {"name": "S"},"asset": {"id": "'"$MODEL_ID"'"}}' echo $DEPLOYMENT_PAYLOAD | python -m json.tool %env DEPLOYMENT_PAYLOAD=$deployment_payload %%bash --out deployment_id curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$DEPLOYMENT_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/deployments?version=2020-08-01" | grep '"id": ' | awk -F '"' '{ print $4 }' | sed -n 3p %env DEPLOYMENT_ID=$deployment_id %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ "$WML_ENDPOINT_URL/ml/v4/deployments/$DEPLOYMENT_ID?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool %%bash --out scoring_payload SCORING_PAYLOAD='{"space_id": "$SPACE_ID","input_data": [{"fields": ["age", "sex", "bmi", "bp", "s1", "s2", "s3", "s4", "s5", "s6"], "values": [[-0.00188201652779104, -0.044641636506989, -0.0514740612388061, -0.0263278347173518, -0.00844872411121698, -0.019163339748222, 0.0744115640787594, -0.0394933828740919, -0.0683297436244215, -0.09220404962683], [0.0852989062966783, 0.0506801187398187, 0.0444512133365941, -0.00567061055493425, -0.0455994512826475, -0.0341944659141195, -0.0323559322397657, -0.00259226199818282, 0.00286377051894013, -0.0259303389894746]]}]}' echo $SCORING_PAYLOAD | python -m json.tool %env SCORING_PAYLOAD=$scoring_payload %%bash curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$SCORING_PAYLOAD"\ "$WML_ENDPOINT_URL/ml/v4/deployments/$DEPLOYMENT_ID/predictions?version=2020-08-01" \ | python -m json.tool %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ "$WML_ENDPOINT_URL/ml/v4/deployments?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ "$WML_ENDPOINT_URL/ml/v4/models/$MODEL_ID/revisions?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool %%bash --out revision_payload REVISION_PAYLOAD='{"space_id": "'"$SPACE_ID"'", "commit_message": "Updated model."}' echo $REVISION_PAYLOAD | python -m json.tool %env REVISION_PAYLOAD=$revision_payload %%bash curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$REVISION_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/models/$MODEL_ID/revisions?version=2020-08-01" \ | python -m json.tool %%bash --out update_payload echo '[{"op": "add", "path": "/name", "value": "updated scikit model"}]' | python -m json.tool %env UPDATE_PAYLOAD=$update_payload %%bash curl -sk -X PATCH \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$UPDATE_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/models/$MODEL_ID?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool !wget https://github.com/IBM/watson-machine-learning-samples/raw/master/cloud/models/scikit/diabetes/model/new_diabetes_model.tar.gz \ -O new_diabetes_model.tar.gz %%bash curl -sk -X PUT \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/gzip" \ --header "Accept: application/json" \ --data-binary "@new_diabetes_model.tar.gz" \ "$WML_ENDPOINT_URL/ml/v4/models/$MODEL_ID/content?space_id=$SPACE_ID&version=2020-08-01&content_format=native" \ | python -m json.tool %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ "$WML_ENDPOINT_URL/ml/v4/models/$MODEL_ID/revisions?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool %%bash curl -sk -X GET \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ "$WML_ENDPOINT_URL/ml/v4/models/$MODEL_ID?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool %%bash --out redeploy_payload echo '[{"op": "replace", "path": "/asset", "value": {"id": "'"$MODEL_ID"'"}}]' \ | python -m json.tool %env REDEPLOY_PAYLOAD=$redeploy_payload %%bash curl -sk -X PATCH \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$REDEPLOY_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/deployments/$DEPLOYMENT_ID?space_id=$SPACE_ID&version=2020-08-01" \ | python -m json.tool %%bash curl -sk -X POST \ --header "Authorization: Bearer $TOKEN" \ --header "Content-Type: application/json" \ --header "Accept: application/json" \ --data "$SCORING_PAYLOAD" \ "$WML_ENDPOINT_URL/ml/v4/deployments/$DEPLOYMENT_ID/predictions?version=2020-08-01" \ | python -m json.tool
0.317426
0.92297
# The `StreamPowerSmoothThresholdEroder` component Landlab's `StreamPowerSmoothThresholdEroder` (here SPSTE for short) is a fluvial landscape evolution component that uses a thresholded form of the stream power erosion law. The novel aspect is that the threshold takes a smoothed form rather than an abrupt mathematical discontinuity: as long as slope and drainage area are greater than zero, there is always *some* erosion rate even if the erosive potential function is below the nominal threshold value. This approach is motivated by the finding that mathematically discontinuous functions in numerical models can lead to "numerical daemons": non-smooth functional behavior that can greatly complicate optimization (Clark & Kavetski, 2010; Kavetski & Clark, 2010, 2011). The SPSTE is one of the fluvial erosion components used in the *terrainBento* collection of landscape evolution models (Barnhart et al., 2019). This tutorial provides a brief overview of how to use the SPSTE component. *(G.E. Tucker, 2021)* ## Theory The SPSTE formulation is as follows. Consider a location on a stream channel that has local downstream slope gradient $S$ and drainage area $A$. We define an erosion potential function $\omega$ as $$\omega = KA^mS^n$$ where $K$ is an erodibility coefficient with dimensions of $[L^{(1-2m)}/T]$. The erosion potential function has dimensions of erosion (lowering) rate, [L/T], and it represents the rate of erosion that would occur if there were no threshold term. The expression takes the form of the familiar area-slope erosion law, also known as the "stream power law" because the exponents can be configured to represent an erosion law that depends on stream power per unit bed area (Whipple & Tucker, 1999). A common choice of exponents is $m=1/2$, $n=1$, but other combinations are possible depending on one's assumptions about process, hydrology, channel geometry, and other factors (e.g., Howard et al., 1994; Whipple et al., 2000). We also define a threshold erosion potential function, $\omega_c$, below which erosion rate declines precipitously. Given these definitions, a mathematically discontinuous threshold erosion function would look like this: $$E = \max (\omega - \omega_c, 0)$$ This kind of formulation is mathematically simple, and given data on $E$ and $\omega$, one could easily find $K$ and $\omega_c$ empirically by fitting a line. Yet even in the case of sediment transport, where the initial motion of grains is usually represented by a threshold shear stress (often referred to as the *critical shear stress* for initiation of sediment motion), we know that *some* transport still occurs below the nominal threshold (e.g, Wilcock & McArdell, 1997). Although it is undeniably true that the rate of sediment transport declines rapidly when the average shear stress drops below a critical value, the strictly linear-with-threshold formulation is really more of convenient mathematical fiction than an accurate reflection of geophysical reality. In bed-load sediment transport, reality seems to be smoother than this mathematical fiction, if one transport rates averaged over a suitably long time period. The same is likely true for the hydraulic detachment and removal of cohesive/rocky material as well. Furthermore, as alluded to above, a strict threshold expression for transport or erosion can create numerical daemons that complicate model analysis. To avoid the mathematical discontinuity at $\omega=\omega_c$, SPSTE uses a smoothed version of the above function: $$E = \omega - \omega_c \left( 1 - e^{-\omega / \omega_c} \right)$$ The code below generates a plot that compares the strict threshold and smooth threshold erosion laws. ``` import numpy as np import matplotlib.pyplot as plt from landlab import RasterModelGrid, imshow_grid from landlab.components import FlowAccumulator, StreamPowerSmoothThresholdEroder omega = np.arange(0, 5.01, 0.01) omegac = 1.0 Eabrupt = np.maximum(omega - omegac, 0.0) Esmooth = omega - omegac * (1.0 - np.exp(-omega / omegac)) plt.plot(omega, Esmooth, "k", label="Smoothed threshold") plt.plot(omega, Eabrupt, "k--", label="Hard threshold") plt.plot([1.0, 1.0], [0.0, 4.0], "g:", label=r"$\omega=\omega_c$") plt.xlabel(r"Erosion potential function ($\omega$)") plt.ylabel("Erosion rate") plt.legend() ``` Notice that the SPSTE formulation effectively smooths over the sharp discontinuity at $\omega = \omega_c$. ### Equilibrium Consider a case of steady, uniform fluvial erosion. Let the ratio of the erosion potential function to its threshold value be a constant, as $$\beta = \omega / \omega_c$$ This allows us to replace instances of $\omega_c$ with $(1/\beta) \omega$, $$E = KA^m S^n - \frac{1}{\beta} KA^m S^n \left( 1 - e^{-\beta} \right)$$ $$ = K A^m S^n \left( 1 - \frac{1}{\beta} \left( 1 - e^{-\beta} \right)\right)$$ Let $$\alpha = \left( 1 - \frac{1}{\beta} \left( 1 - e^{-\beta} \right)\right)$$ Then we can solve for the steady-state slope as $$\boxed{S = \left( \frac{E}{\alpha K A^m} \right)^{1/n}}$$ We can relate $\beta$ and $\omega_c$ via $$\omega_c = E / (1-\beta (1 - e^{-\beta} ))$$ ## Usage Here we get a summary of the component's usage and input parameters by printing out the component's header docstring: ``` print(StreamPowerSmoothThresholdEroder.__doc__) ``` ## Example Here we'll run a steady-state example with $\beta = 1$. To do this, we'll start with a slightly inclined surface with some superimposed random noise, and subject it to a steady rate of rock uplift relative to baselevel, $U$, until it reaches a steady state. ``` # Parameters K = 0.0001 # erodibility coefficient, 1/yr m = 0.5 # drainage area exponent beta = 1.0 # ratio of w / wc [-] uplift_rate = 0.001 # rate of uplift relative to baselevel, m/yr nrows = 16 # number of grid rows (small for speed) ncols = 25 # number of grid columns (") dx = 100.0 # grid spacing, m dt = 1000.0 # time-step duration, yr run_duration = 2.5e5 # duration of run, yr init_slope = 0.001 # initial slope gradient of topography, m/m noise_amplitude = 0.1 # amplitude of random noise on init. topo. # Derived parameters omega_c = uplift_rate / (beta - (1 - np.exp(-beta))) nsteps = int(run_duration / dt) # Create grid and elevation field with initial ramp grid = RasterModelGrid((nrows, ncols), xy_spacing=dx) grid.set_closed_boundaries_at_grid_edges(True, True, True, False) elev = grid.add_zeros("topographic__elevation", at="node") elev[:] = init_slope * grid.y_of_node np.random.seed(0) elev[grid.core_nodes] += noise_amplitude * np.random.rand(grid.number_of_core_nodes) # Display starting topography imshow_grid(grid, elev) # Instantiate the two components # (note that m=0.5, n=1 are the defaults for SPSTE) fa = FlowAccumulator(grid, flow_director="D8") spste = StreamPowerSmoothThresholdEroder(grid, K_sp=K, threshold_sp=omega_c) # Run the model for i in range(nsteps): # flow accumulation fa.run_one_step() # uplift / baselevel elev[grid.core_nodes] += uplift_rate * dt # erosion spste.run_one_step(dt) # Display the final topopgraphy imshow_grid(grid, elev) # Calculate the analytical solution in slope-area space alpha = 1.0 - (1.0 / beta) * (1.0 - np.exp(-beta)) area_pred = np.array([1.0e4, 1.0e6]) slope_pred = uplift_rate / (alpha * K * area_pred ** m) # Plot the slope-area relation and compare with analytical area = grid.at_node["drainage_area"] slope = grid.at_node["topographic__steepest_slope"] cores = grid.core_nodes plt.loglog(area[cores], slope[cores], "k.") plt.plot(area_pred, slope_pred) plt.legend(["Numerical", "Analytical"]) plt.title("Equilibrium slope-area relation") plt.xlabel(r"Drainage area (m$^2$)") _ = plt.ylabel("Slope (m/m)") ``` The above plot shows that the simulation has reached steady state, and that the slope-area relation matches the analytical solution. We can also inspect the erosion potential function, which should be uniform in space, and (because $\beta = 1$ in this example) equal to the threshold $\omega_c$. We can also compare this with the uplift rate and the erosion-rate function: ``` # Plot the erosion potential function omega = K * area[cores] ** m * slope[cores] plt.plot([0.0, 1.0e6], [omega_c, omega_c], "g", label=r"$\omega_c$") plt.plot(area[cores], omega, ".", label=r"$\omega$") plt.plot([0.0, 1.0e6], [uplift_rate, uplift_rate], "r", label=r"$U$") erorate = omega - omega_c * (1.0 - np.exp(-omega / omega_c)) plt.plot( area[cores], erorate, "k+", label=r"$\omega - \omega_c (1 - e^{-\omega/\omega_c})$" ) plt.ylim([0.0, 2 * omega_c]) plt.legend() plt.title("Erosion potential function vs. threshold term") plt.xlabel(r"Drainage area (m$^2$)") _ = plt.ylabel("Erosion potential function (m/yr)") ``` The above plot illustrates how the SPSTE allows erosion to occur even when the erosion potential lies at or below the nominal threshold. ## References Barnhart, K. R., Glade, R. C., Shobe, C. M., & Tucker, G. E. (2019). Terrainbento 1.0: a Python package for multi-model analysis in long-term drainage basin evolution. Geoscientific Model Development, 12(4), 1267-1297. Clark, M. P., & Kavetski, D. (2010). Ancient numerical daemons of conceptual hydrological modeling: 1. Fidelity and efficiency of time stepping schemes. Water Resources Research, 46(10). Howard, A. D., Dietrich, W. E., & Seidl, M. A. (1994). Modeling fluvial erosion on regional to continental scales. Journal of Geophysical Research: Solid Earth, 99(B7), 13971-13986. Kavetski, D., & Clark, M. P. (2010). Ancient numerical daemons of conceptual hydrological modeling: 2. Impact of time stepping schemes on model analysis and prediction. Water Resources Research, 46(10). Kavetski, D., & Clark, M. P. (2011). Numerical troubles in conceptual hydrology: Approximations, absurdities and impact on hypothesis testing. Hydrological Processes, 25(4), 661-670. Whipple, K. X., Hancock, G. S., & Anderson, R. S. (2000). River incision into bedrock: Mechanics and relative efficacy of plucking, abrasion, and cavitation. Geological Society of America Bulletin, 112(3), 490-503. Whipple, K. X., & Tucker, G. E. (1999). Dynamics of the stream‐power river incision model: Implications for height limits of mountain ranges, landscape response timescales, and research needs. Journal of Geophysical Research: Solid Earth, 104(B8), 17661-17674. Wilcock, P. R., & McArdell, B. W. (1997). Partial transport of a sand/gravel sediment. Water Resources Research, 33(1), 235-245.
github_jupyter
import numpy as np import matplotlib.pyplot as plt from landlab import RasterModelGrid, imshow_grid from landlab.components import FlowAccumulator, StreamPowerSmoothThresholdEroder omega = np.arange(0, 5.01, 0.01) omegac = 1.0 Eabrupt = np.maximum(omega - omegac, 0.0) Esmooth = omega - omegac * (1.0 - np.exp(-omega / omegac)) plt.plot(omega, Esmooth, "k", label="Smoothed threshold") plt.plot(omega, Eabrupt, "k--", label="Hard threshold") plt.plot([1.0, 1.0], [0.0, 4.0], "g:", label=r"$\omega=\omega_c$") plt.xlabel(r"Erosion potential function ($\omega$)") plt.ylabel("Erosion rate") plt.legend() print(StreamPowerSmoothThresholdEroder.__doc__) # Parameters K = 0.0001 # erodibility coefficient, 1/yr m = 0.5 # drainage area exponent beta = 1.0 # ratio of w / wc [-] uplift_rate = 0.001 # rate of uplift relative to baselevel, m/yr nrows = 16 # number of grid rows (small for speed) ncols = 25 # number of grid columns (") dx = 100.0 # grid spacing, m dt = 1000.0 # time-step duration, yr run_duration = 2.5e5 # duration of run, yr init_slope = 0.001 # initial slope gradient of topography, m/m noise_amplitude = 0.1 # amplitude of random noise on init. topo. # Derived parameters omega_c = uplift_rate / (beta - (1 - np.exp(-beta))) nsteps = int(run_duration / dt) # Create grid and elevation field with initial ramp grid = RasterModelGrid((nrows, ncols), xy_spacing=dx) grid.set_closed_boundaries_at_grid_edges(True, True, True, False) elev = grid.add_zeros("topographic__elevation", at="node") elev[:] = init_slope * grid.y_of_node np.random.seed(0) elev[grid.core_nodes] += noise_amplitude * np.random.rand(grid.number_of_core_nodes) # Display starting topography imshow_grid(grid, elev) # Instantiate the two components # (note that m=0.5, n=1 are the defaults for SPSTE) fa = FlowAccumulator(grid, flow_director="D8") spste = StreamPowerSmoothThresholdEroder(grid, K_sp=K, threshold_sp=omega_c) # Run the model for i in range(nsteps): # flow accumulation fa.run_one_step() # uplift / baselevel elev[grid.core_nodes] += uplift_rate * dt # erosion spste.run_one_step(dt) # Display the final topopgraphy imshow_grid(grid, elev) # Calculate the analytical solution in slope-area space alpha = 1.0 - (1.0 / beta) * (1.0 - np.exp(-beta)) area_pred = np.array([1.0e4, 1.0e6]) slope_pred = uplift_rate / (alpha * K * area_pred ** m) # Plot the slope-area relation and compare with analytical area = grid.at_node["drainage_area"] slope = grid.at_node["topographic__steepest_slope"] cores = grid.core_nodes plt.loglog(area[cores], slope[cores], "k.") plt.plot(area_pred, slope_pred) plt.legend(["Numerical", "Analytical"]) plt.title("Equilibrium slope-area relation") plt.xlabel(r"Drainage area (m$^2$)") _ = plt.ylabel("Slope (m/m)") # Plot the erosion potential function omega = K * area[cores] ** m * slope[cores] plt.plot([0.0, 1.0e6], [omega_c, omega_c], "g", label=r"$\omega_c$") plt.plot(area[cores], omega, ".", label=r"$\omega$") plt.plot([0.0, 1.0e6], [uplift_rate, uplift_rate], "r", label=r"$U$") erorate = omega - omega_c * (1.0 - np.exp(-omega / omega_c)) plt.plot( area[cores], erorate, "k+", label=r"$\omega - \omega_c (1 - e^{-\omega/\omega_c})$" ) plt.ylim([0.0, 2 * omega_c]) plt.legend() plt.title("Erosion potential function vs. threshold term") plt.xlabel(r"Drainage area (m$^2$)") _ = plt.ylabel("Erosion potential function (m/yr)")
0.868074
0.991969
# Angers School of AI - Reinforcement Learning --- You are welcome to read and use this notebook to have a first idea of what is the reinforcement learning and how to use it! ### 1. Reinforcement Learning vs Supervised Learning / Unsupervised Learning ![DataScienceSchema.PNG](attachment:DataScienceSchema.PNG) *** ## References and Tools #### Trainee : > - **Deep Reinforcement Learning Nanodegree** from Udacity. Excellent trainee on Deep Reinforcement Learning #### Tool : > - **OpenAI Gym**, an open-source toolkit created by OpenAI for developing and comparing reinforcement learning (RL) algorithms #### Sheet : > - **https://github.com/udacity/deep-reinforcement-learning/blob/master/cheatsheet/cheatsheet.pdf**. It contains Reinforcement Learning algorithms. #### Books : > - **Reinforcement Learning: An Introduction** by Richard S. Sutton and Andrew G. Barto. This book is a classic text with an excellent introduction to reinforcement learning fundamentals > - **Grokking Deep Reinforcement Learning** by Miguel Morales. *** ## An example of reinforcement learning : Playing Quarto ![Quarto2.png](attachment:Quarto2.png) #### Game rules : > - **4*4 board** > - **16 different pieces** either tall or short, black or white, hollow top or solid top, square or circular > - **Start the game** : the first player select a piece that the second player must place on the board. > - **Next steps** : Then the second player select one of the remaining pieces that the first player must place on the board and so on... > - **Who win ?** : A player wins by placing a piece on the board which forms a horizontal, vertical, or diagonal row of four pieces, all of which have **at least one** common attribute (all short, all circular, etc...) *** ## Main concepts of the reinforcement learning > - The environnement : it defines a world where an agent is able to interact with. > - The agent : he needs to learn how to achieve goals by interacting with the environment > - A state : it's like a picture of the environment. > - A reward : it's the feedback given by the environment when the agent decide to execute an action. ![RLSchema.PNG](attachment:RLSchema.PNG) (Source: Sutton and Barto, 2017) At a given state, the agent interacts with the environment by executing a possible action. Then the environment informs the agent of the new state after having applied this action and give the corresponding reward of this change. #### Quarto example : > - **our environment** contains : > > - 16 pieces (with a number to each of them from 0 to 15) > > - the board_state : an array of 17 cells. Cells 0 to 15 represent the board and cell 16 correspond to the selected piece from the previous player. This piece is not already on the board. ![quarto_board.PNG](attachment:quarto_board.PNG) > > each cell contains the number of the piece positioned on it, -1 if empty > > - The action space (list of possible actions) > > - The list of empty positions on the board > > - The list of remaining pieces not selected by a player. > - **an agent** has the role of one of the two players. Either he will start the game or play second. > - **a state** is a board_state of the environment > - **reward** : **0** if the action doesn't lead to the end of the game, **100** if the agent win the game after his action, **-100** if the agent lose after the next action of his opponent. In case of draw result, the reward is equal to 0 > - **an action** is in reality a couple of actions. The first one defines where to position the piece given by the opponent. The second one define the piece that the agent will give to the opponent. In our environment, the couple is transformed in an integer between 0 and 256. ## Remarks In this presentation, we focus on a **fully observable environment** ( on the opposit of a partial observable environment like self driving car). We also have a **deterministic transition** means that applying an action to a specific state will always result to the same new state. We have a **finite list of action**. Finally we are in an **episodic tasks** problem (means that we have a well-defined starting and ending point - **terminal state**) in opposition of **continuous tasks** like self-driving car. --- (Source: https://joshgreaves.com/reinforcement-learning/understanding-rl-the-bellman-equations/) ## Reward and Return RL agents learn to maximize cumulative future reward. The word used to describe cumulative future reward is return and is often denoted with R. We also use a subscript t to give the return from a certain time step. In mathematical notation, it looks like this: ![Return.PNG](attachment:Return.PNG) More common than using future cumulative reward as return is using future cumulative discounted reward: ![DiscountedReturn.PNG](attachment:DiscountedReturn.PNG) where 0 < $\gamma$ < 1. The two benefits of defining return this way is that the return is well defined for infinite series, and that it gives a greater weight to sooner rewards, meaning that we care more about imminent rewards and less about rewards we will receive further in the future. The smaller the value we select for \gamma the more true this is. This can be seen in the special cases where we let \gamma equal 0 or 1. If \gamma is 1, we arrive back at our first equation where we care about all rewards equally, not matter how far into the future they are. On the other hand, when \gamma is 0 we care only about the immediate reward, and do not care about any reward after that. This would lead our algorithm to be extremely short-sighted. It would learn to take the action that is best for that moment, but won’t take into account the effects that action will have on its future. > **Example:** > ![GrilleShortestPath.PNG](attachment:GrilleShortestPath.PNG) > Rewards : > - -1 for each move > - -3 for the mountain > - 10 for achieving the goal ## Policy A policy defines for each state of the environment the action to choose. The objective of reinforcement learning is to find the best policy for our agent in order to maximize the cumulative rewards. Sometimes it may be better to sacrifice immediate reward (reward at time step Rₜ) to gain more long-term reward. To learn the optimal policy, we make use of value functions. There are two types of value functions that are used in reinforcement learning: the state value function, denoted V(s), and the action value function, denoted Q(s, a). ![PolicyShortestPath.PNG](attachment:PolicyShortestPath.PNG) ## State Value Function The state value function describes the value of a state when following a policy. It is the expected return when starting from state s acting according to our policy π: ![BellmanEquation.PNG](attachment:BellmanEquation.PNG) It is important to note that even for the same environment **the value function changes depending on the policy**. This is because the value of the state changes depending on how you act, since the way that you act in that particular state affects how much reward you expect to see. Also note the importance of the expectation. (As a refresher, an expectation is much like a mean; it is literally what return you expect to see.) The reason we use an expectation is that there is some randomness in what happens after you arrive at a state. You may have a stochastic policy, which means we need to combine the results of all the different actions that we take. Also, the transition function can be stochastic, meaning that we may not end up in any state with 100% probability. Remember in the example above: when you select an action, the environment returns the next state. There may be multiple states it could return, even given one action. We will see more of this as we look at the Bellman equations. The expectation takes all of this randomness into account. ![StateValueFunctionExample.PNG](attachment:StateValueFunctionExample.PNG) ![StateValueFunctionExample2.PNG](attachment:StateValueFunctionExample2.PNG) ## Optimal Policy It always exist at least one policy that's better than or equal to all other policies. We call this policy an optimal policy **π***. It's guaranteed to exist but it may not be unique. All optimal policy has the same value function which is denoted **v*** ## Action Value Function The other value function we will use is the action value function. The action value function tells us the value of taking an action in some state when following a certain policy. It is the expected return given the state and action under π: ![ActionValueFunction.PNG](attachment:ActionValueFunction.PNG) The same notes for the state value function apply to the action value function. The expectation takes into account the randomness in future actions according to the policy, as well as the randomness of the returned state from the environment. ![ActionValueFunctionSchema.PNG](attachment:ActionValueFunctionSchema.PNG) The optimal action value function is denoted **q*** ## Bellman Equations Bellman equation for the state value function: ![BellmanEquationState.PNG](attachment:BellmanEquationState.PNG) Bellman equation for the action value function ![BellmanEquationAction.PNG](attachment:BellmanEquationAction.PNG) *** ## Reinforcement Learning Algorithms an interesting link : **https://github.com/udacity/deep-reinforcement-learning/blob/master/cheatsheet/cheatsheet.pdf** Given the **q*** action value function, it's possible to find the best policy by choosing for each state the action that give the best return. So, the main idea is to find this **q*** by interacting with the environment and getting feedback depending on the action choosen. **An eposide** is a succession of action from a starting state to a final state. (In the quarto example, an episode correspond to a game) **First visit vs Every visit** algorithm (see the link above) ### Greedy Policy ![greedyPolicy.PNG](attachment:greedyPolicy.PNG) (source from Udacity) ### Epsilon Greedy Policy The epsilon greedy policy is nearly the same as the greedy policy. ![epsilonGreedyPolicy.PNG](attachment:epsilonGreedyPolicy.PNG) (source from Udacity) ### Exploration vs Exploitation Depending on the value of epsilon, we are more in on exploration trend or an exploitation one. With an epsilon value equal to one, we are in an **exploration** "process". With a value close to zero, we are in an **exploitation** "process". In order to converge, the epsilon value should decay to a small positive number. ## Temporal-Difference Methods The main difference with these methods is that we **do not wait for the end of an episode to update the Q-table**. The Q-table is updated after every time step. Here is a [link](https://github.com/udacity/deep-reinforcement-learning/blob/master/cheatsheet/cheatsheet.pdf) to have more details on Sarsa, Q-learning (Sarsamax) and Expected Sarsa algorithms. ![Sarsa%20Algorithm.PNG](attachment:Sarsa%20Algorithm.PNG) Sarsa and Expected Sarsa are **on-policy** TD control methods. Q-learning is an **off_policy** TD control methods.
github_jupyter
# Angers School of AI - Reinforcement Learning --- You are welcome to read and use this notebook to have a first idea of what is the reinforcement learning and how to use it! ### 1. Reinforcement Learning vs Supervised Learning / Unsupervised Learning ![DataScienceSchema.PNG](attachment:DataScienceSchema.PNG) *** ## References and Tools #### Trainee : > - **Deep Reinforcement Learning Nanodegree** from Udacity. Excellent trainee on Deep Reinforcement Learning #### Tool : > - **OpenAI Gym**, an open-source toolkit created by OpenAI for developing and comparing reinforcement learning (RL) algorithms #### Sheet : > - **https://github.com/udacity/deep-reinforcement-learning/blob/master/cheatsheet/cheatsheet.pdf**. It contains Reinforcement Learning algorithms. #### Books : > - **Reinforcement Learning: An Introduction** by Richard S. Sutton and Andrew G. Barto. This book is a classic text with an excellent introduction to reinforcement learning fundamentals > - **Grokking Deep Reinforcement Learning** by Miguel Morales. *** ## An example of reinforcement learning : Playing Quarto ![Quarto2.png](attachment:Quarto2.png) #### Game rules : > - **4*4 board** > - **16 different pieces** either tall or short, black or white, hollow top or solid top, square or circular > - **Start the game** : the first player select a piece that the second player must place on the board. > - **Next steps** : Then the second player select one of the remaining pieces that the first player must place on the board and so on... > - **Who win ?** : A player wins by placing a piece on the board which forms a horizontal, vertical, or diagonal row of four pieces, all of which have **at least one** common attribute (all short, all circular, etc...) *** ## Main concepts of the reinforcement learning > - The environnement : it defines a world where an agent is able to interact with. > - The agent : he needs to learn how to achieve goals by interacting with the environment > - A state : it's like a picture of the environment. > - A reward : it's the feedback given by the environment when the agent decide to execute an action. ![RLSchema.PNG](attachment:RLSchema.PNG) (Source: Sutton and Barto, 2017) At a given state, the agent interacts with the environment by executing a possible action. Then the environment informs the agent of the new state after having applied this action and give the corresponding reward of this change. #### Quarto example : > - **our environment** contains : > > - 16 pieces (with a number to each of them from 0 to 15) > > - the board_state : an array of 17 cells. Cells 0 to 15 represent the board and cell 16 correspond to the selected piece from the previous player. This piece is not already on the board. ![quarto_board.PNG](attachment:quarto_board.PNG) > > each cell contains the number of the piece positioned on it, -1 if empty > > - The action space (list of possible actions) > > - The list of empty positions on the board > > - The list of remaining pieces not selected by a player. > - **an agent** has the role of one of the two players. Either he will start the game or play second. > - **a state** is a board_state of the environment > - **reward** : **0** if the action doesn't lead to the end of the game, **100** if the agent win the game after his action, **-100** if the agent lose after the next action of his opponent. In case of draw result, the reward is equal to 0 > - **an action** is in reality a couple of actions. The first one defines where to position the piece given by the opponent. The second one define the piece that the agent will give to the opponent. In our environment, the couple is transformed in an integer between 0 and 256. ## Remarks In this presentation, we focus on a **fully observable environment** ( on the opposit of a partial observable environment like self driving car). We also have a **deterministic transition** means that applying an action to a specific state will always result to the same new state. We have a **finite list of action**. Finally we are in an **episodic tasks** problem (means that we have a well-defined starting and ending point - **terminal state**) in opposition of **continuous tasks** like self-driving car. --- (Source: https://joshgreaves.com/reinforcement-learning/understanding-rl-the-bellman-equations/) ## Reward and Return RL agents learn to maximize cumulative future reward. The word used to describe cumulative future reward is return and is often denoted with R. We also use a subscript t to give the return from a certain time step. In mathematical notation, it looks like this: ![Return.PNG](attachment:Return.PNG) More common than using future cumulative reward as return is using future cumulative discounted reward: ![DiscountedReturn.PNG](attachment:DiscountedReturn.PNG) where 0 < $\gamma$ < 1. The two benefits of defining return this way is that the return is well defined for infinite series, and that it gives a greater weight to sooner rewards, meaning that we care more about imminent rewards and less about rewards we will receive further in the future. The smaller the value we select for \gamma the more true this is. This can be seen in the special cases where we let \gamma equal 0 or 1. If \gamma is 1, we arrive back at our first equation where we care about all rewards equally, not matter how far into the future they are. On the other hand, when \gamma is 0 we care only about the immediate reward, and do not care about any reward after that. This would lead our algorithm to be extremely short-sighted. It would learn to take the action that is best for that moment, but won’t take into account the effects that action will have on its future. > **Example:** > ![GrilleShortestPath.PNG](attachment:GrilleShortestPath.PNG) > Rewards : > - -1 for each move > - -3 for the mountain > - 10 for achieving the goal ## Policy A policy defines for each state of the environment the action to choose. The objective of reinforcement learning is to find the best policy for our agent in order to maximize the cumulative rewards. Sometimes it may be better to sacrifice immediate reward (reward at time step Rₜ) to gain more long-term reward. To learn the optimal policy, we make use of value functions. There are two types of value functions that are used in reinforcement learning: the state value function, denoted V(s), and the action value function, denoted Q(s, a). ![PolicyShortestPath.PNG](attachment:PolicyShortestPath.PNG) ## State Value Function The state value function describes the value of a state when following a policy. It is the expected return when starting from state s acting according to our policy π: ![BellmanEquation.PNG](attachment:BellmanEquation.PNG) It is important to note that even for the same environment **the value function changes depending on the policy**. This is because the value of the state changes depending on how you act, since the way that you act in that particular state affects how much reward you expect to see. Also note the importance of the expectation. (As a refresher, an expectation is much like a mean; it is literally what return you expect to see.) The reason we use an expectation is that there is some randomness in what happens after you arrive at a state. You may have a stochastic policy, which means we need to combine the results of all the different actions that we take. Also, the transition function can be stochastic, meaning that we may not end up in any state with 100% probability. Remember in the example above: when you select an action, the environment returns the next state. There may be multiple states it could return, even given one action. We will see more of this as we look at the Bellman equations. The expectation takes all of this randomness into account. ![StateValueFunctionExample.PNG](attachment:StateValueFunctionExample.PNG) ![StateValueFunctionExample2.PNG](attachment:StateValueFunctionExample2.PNG) ## Optimal Policy It always exist at least one policy that's better than or equal to all other policies. We call this policy an optimal policy **π***. It's guaranteed to exist but it may not be unique. All optimal policy has the same value function which is denoted **v*** ## Action Value Function The other value function we will use is the action value function. The action value function tells us the value of taking an action in some state when following a certain policy. It is the expected return given the state and action under π: ![ActionValueFunction.PNG](attachment:ActionValueFunction.PNG) The same notes for the state value function apply to the action value function. The expectation takes into account the randomness in future actions according to the policy, as well as the randomness of the returned state from the environment. ![ActionValueFunctionSchema.PNG](attachment:ActionValueFunctionSchema.PNG) The optimal action value function is denoted **q*** ## Bellman Equations Bellman equation for the state value function: ![BellmanEquationState.PNG](attachment:BellmanEquationState.PNG) Bellman equation for the action value function ![BellmanEquationAction.PNG](attachment:BellmanEquationAction.PNG) *** ## Reinforcement Learning Algorithms an interesting link : **https://github.com/udacity/deep-reinforcement-learning/blob/master/cheatsheet/cheatsheet.pdf** Given the **q*** action value function, it's possible to find the best policy by choosing for each state the action that give the best return. So, the main idea is to find this **q*** by interacting with the environment and getting feedback depending on the action choosen. **An eposide** is a succession of action from a starting state to a final state. (In the quarto example, an episode correspond to a game) **First visit vs Every visit** algorithm (see the link above) ### Greedy Policy ![greedyPolicy.PNG](attachment:greedyPolicy.PNG) (source from Udacity) ### Epsilon Greedy Policy The epsilon greedy policy is nearly the same as the greedy policy. ![epsilonGreedyPolicy.PNG](attachment:epsilonGreedyPolicy.PNG) (source from Udacity) ### Exploration vs Exploitation Depending on the value of epsilon, we are more in on exploration trend or an exploitation one. With an epsilon value equal to one, we are in an **exploration** "process". With a value close to zero, we are in an **exploitation** "process". In order to converge, the epsilon value should decay to a small positive number. ## Temporal-Difference Methods The main difference with these methods is that we **do not wait for the end of an episode to update the Q-table**. The Q-table is updated after every time step. Here is a [link](https://github.com/udacity/deep-reinforcement-learning/blob/master/cheatsheet/cheatsheet.pdf) to have more details on Sarsa, Q-learning (Sarsamax) and Expected Sarsa algorithms. ![Sarsa%20Algorithm.PNG](attachment:Sarsa%20Algorithm.PNG) Sarsa and Expected Sarsa are **on-policy** TD control methods. Q-learning is an **off_policy** TD control methods.
0.943764
0.992047
### Librerias utilizadas ``` import pandas as pd import matplotlib.pyplot as plt import ipywidgets as widgets from ipywidgets import interact from area import area import ipydatetime ``` ### Histograma de tiempos de viaje para un año dado ``` def histograma_por_año(año): df = pd.read_csv("Data/OD_{}.csv".format(año)) duration = df["duration_sec"] plt.hist(duration, bins=20 ,histtype='bar', edgecolor='k') plt.xlabel('Duracion del viaje(segundos)') plt.ylabel('Numero de viajes') plt.title('Histograma de tiempos de viaje año {}'.format(año)) return plt.show() interact(histograma_por_año, año=[2014,2015,2016,2017]) ``` ### Listado del Top N de estaciones más utilizadas para un año dado. Dividirlo en: - Estaciones de salida - Estaciones de llegada - En general ``` def listado_top_estaciones_año(año,N): df = pd.read_csv("Data/OD_{}.csv".format(año)) start_st = pd.DataFrame({'station_code': df['start_station_code']}) end_st = pd.DataFrame({'station_code': df['end_station_code']}) general_st = start_st.append(end_st) start_st = start_st.groupby('station_code').size().reset_index(name='counts') start_st = start_st.sort_values(by=['counts'],ascending=False) start_st = start_st.head(int(N)) start_st.index = range(1, int(N)+1) end_st = end_st.groupby('station_code').size().reset_index(name='counts') end_st = end_st.sort_values(by=['counts'],ascending=False) end_st = end_st.head(int(N)) end_st.index = range(1, int(N)+1) general_st = general_st.groupby('station_code').size().reset_index(name='counts') general_st = general_st.sort_values(by=['counts'],ascending=False) general_st = general_st.head(int(N)) general_st.index = range(1, int(N)+1) informe = pd.concat([start_st,end_st,general_st],axis=1) informe.columns = ["start_station_code","start_count","end_station_code","end_count","general_station_code","general_count"] informe.to_csv("listado_top{}_estaciones_{}.csv".format(int(N),año),index=False) return informe interact(listado_top_estaciones_año, año=[2014,2015,2016,2017],N=(0.0,200.0,1)) ``` ### Listado del Top N de viajes más comunes para un año dado. Donde un viaje se define por su estación de salida y de llegada ``` def listado_top_viajes_año(año,N): df = pd.read_csv("Data/OD_{}.csv".format(año)) viajes = pd.DataFrame({'start_station_code': df['start_station_code'], 'end_station_code': df['end_station_code']}) viajes["viaje"] = viajes["start_station_code"].astype(str) + '-' + viajes["end_station_code"].astype(str) viajes = viajes.groupby('viaje').size().reset_index(name='counts') viajes = viajes.sort_values(by=['counts'],ascending=False) viajes = viajes.head(int(N)) viajes.index = range(1, int(N)+1) viajes.to_csv("listado_top{}_viajes_{}.csv".format(int(N),año),index=False) return viajes interact(listado_top_viajes_año, año=[2014,2015,2016,2017],N=(0.0,200.0,1)) ``` ### Identificación de horas punta para un año determinado sin tener en cuenta el día. Es decir, si es día de semana, fin de semana, festivo o temporada del año. ``` def histograma_horas_por_año(año): df = pd.read_csv("Data/OD_{}.csv".format(año)) hours = pd.to_datetime(df["start_date"]).dt.hour plt.hist(hours, bins=24 ,histtype='bar', edgecolor='k') plt.xlabel('Hora del viage') plt.xticks(range(0,24)) plt.xlabel('Hora') plt.ylabel('Numero de viajes') plt.title('Histograma de horas puntas de viaje año {}'.format(año)) return plt.show() interact(histograma_horas_por_año, año=[2014,2015,2016,2017]) ``` ### Comparación de utilización del sistema entre dos años cualesquiera. La utilización del sistema se puede medir como: - Cantidad de viajes totales - Tiempo total de utilización del sistema - Cantidad de viajes por estaciones/bicicletas disponibles ``` def comparacion_sistema_año(año1,año2,medida): df1 = pd.read_csv("Data/OD_{}.csv".format(año1)) df2 = pd.read_csv("Data/OD_{}.csv".format(año2)) if medida=="viajes totales": plt.bar([str(año1),str(año2)],[len(df1),len(df2)]) plt.xlabel('Año') plt.ylabel('Numero de viajes') plt.title('Comparacion Nº viajes entre el {} y el {}'.format(año1,año2)) return plt.show() if medida=="tiempo de uso": uso1 = str(pd.Timedelta(df1["duration_sec"].sum(), unit ='s')) uso2 = str(pd.Timedelta(df2["duration_sec"].sum(), unit ='s')) usoTotal = pd.DataFrame({str(año1):uso1, str(año2):uso2},index=[0]) return usoTotal else: start_st1 = pd.DataFrame({'station_code': df1['start_station_code']}) end_st1 = pd.DataFrame({'station_code': df1['end_station_code']}) general_st1 = start_st1.append(end_st1) start_st2 = pd.DataFrame({'station_code': df2['start_station_code']}) end_st2 = pd.DataFrame({'station_code': df2['end_station_code']}) general_st2 = start_st2.append(end_st2) st1 = general_st1.groupby('station_code').size().reset_index(name='counts') st1.index = range(1, len(st1)+1) st2 = general_st2.groupby('station_code').size().reset_index(name='counts') st2.index = range(1, len(st2)+1) comp_est = pd.concat([st1,st2],axis=1) comp_est.columns = ["station_code_"+str(año1),"count_"+str(año1),"station_code_"+str(año2),"count_"+str(año2)] comp_est.to_csv("comparacion_{}_{}_{}.csv".format(año1,año2,medida),index=False) return comp_est interact(comparacion_sistema_año, año1=[2014,2015,2016,2017],año2=[2014,2015,2016,2017], medida=["viajes totales","tiempo de uso", "cantidad de viajes por estacion"]) ``` ### Capacidad instalada total (suma de la capacidad total de cada estación) ``` stations = pd.read_json("Data/stations.json") capacity = pd.json_normalize(stations["stations"]) print("\nCapacidad total : {}".format(capacity['ba'].sum())) ``` ### Cambio en la capacidad instalada entre dos años puntuales ``` def comparacion_capacidad_año(año1,año2): df1 = pd.read_csv("Data/Stations_{}.csv".format(año1)) df2 = pd.read_csv("Data/Stations_{}.csv".format(año2)) stations = pd.read_json("Data/stations.json") capacity = pd.json_normalize(stations["stations"]) sum1 = capacity[capacity.n.astype(int).isin(df1["code"])] sum2 = capacity[capacity.n.astype(int).isin(df2["code"])] change = sum2['ba'].sum() - sum1['ba'].sum() print("El cambio de capacidad entre los años {} y {} ha sido de : {} unidades".format(año1,año2,change)) interact(comparacion_capacidad_año, año1=[2014,2015,2016,2017],año2=[2014,2015,2016,2017]) ``` ### Ampliación de la cobertura de la red entre dos años puntuales. La misma se puede medir como el área total que generan las estaciones. ``` def ampliacion_red_año(año1,año2): df1 = pd.read_csv("Data/Stations_{}.csv".format(año1)) df2 = pd.read_csv("Data/Stations_{}.csv".format(año2)) coordinates1 = [] for i in range(len(df1)): coordinates1.append([df1["latitude"][i],df1["longitude"][i]]) coordinates2 = [] for i in range(len(df2)): coordinates2.append([df2["latitude"][i],df2["longitude"][i]]) coordinates1.sort(key = lambda x: (-x[0], x[1])) coordinates2.sort(key = lambda x: (-x[0], x[1])) obj1 = {'type':'Polygon','coordinates':[coordinates1]} obj2 = {'type':'Polygon','coordinates':[coordinates2]} area1 = area(obj1) area2 = area(obj2) print("La diferencia entre la cobertura de la red entre el {} y el {} es: {} m".format(año1,año2,area2-area1)) interact(ampliacion_red_año, año1=[2014,2015,2016,2017],año2=[2014,2015,2016,2017]) ``` ### Comparación de densidad de la red para un par de años puntuales. La densidad de la red se mide como el área que abarcan todas las estaciones,dividida la cantidad de estaciones. ``` def densidad_red_año(año1,año2): df1 = pd.read_csv("Data/Stations_{}.csv".format(año1)) df2 = pd.read_csv("Data/Stations_{}.csv".format(año2)) coordinates1 = [] for i in range(len(df1)): coordinates1.append([df1["latitude"][i],df1["longitude"][i]]) coordinates2 = [] for i in range(len(df2)): coordinates2.append([df2["latitude"][i],df2["longitude"][i]]) coordinates1.sort(key = lambda x: (-x[0], x[1])) coordinates2.sort(key = lambda x: (-x[0], x[1])) obj1 = {'type':'Polygon','coordinates':[coordinates1]} obj2 = {'type':'Polygon','coordinates':[coordinates2]} densidad1 = area(obj1) / len(df1) densidad2 = area(obj2) / len(df2) print("La diferencia entre la densidad de la red entre el {} y el {} es: {}".format(año1,año2,densidad2-densidad1)) interact(densidad_red_año, año1=[2014,2015,2016,2017],año2=[2014,2015,2016,2017]) ``` ### Velocidad promedio de los ciclistas para un año determinado ``` from math import sin, cos, sqrt, atan2, radians def velocidad_año(año): df = pd.read_csv("Data/OD_{}.csv".format(año)) stations = pd.read_json("Data/stations.json") stations = pd.json_normalize(stations["stations"]) stations.n = stations.n.astype(int) start = pd.DataFrame({'n': df["start_station_code"]}) result_start = pd.merge(start,stations,on='n',how="left") end = pd.DataFrame({'n': df["end_station_code"]}) result_end = pd.merge(end,stations,on='n',how="left") combine = pd.concat([result_start[["n","id","la","lo"]],result_end[["n","id","la","lo"]],df["duration_sec"]],axis=1) combine.columns = ["start_station_code","id","start_station_la","start_station_lo", "end_station_code","end_station_id","end_station_la","end_station_lo","duration_sec"] R = 6373.0 lat1 = combine["start_station_la"].map(radians) lon1 = combine["start_station_lo"].map(radians) lat2 = combine["end_station_la"].map(radians) lon2 = combine["end_station_lo"].map(radians) dlon = lon2 - lon1 dlat = lat2 - lat1 a = (dlat/2).map(sin)**2 + lat1.map(cos) * lat2.map(cos) * (dlon/2).map(sin)**2 c = 2 * a.map(sqrt).map(asin) distance = R * c combine["speed"] = distance / (combine["duration_sec"]/3600) print("La velocidad media de los ciclistas en el año {} es: {} km/h".format(año,combine["speed"].mean())) interact(velocidad_año, año=[2014,2015,2016,2017]) ``` ### Cantidad de bicicletas totales para un momento dado. Considerando la misma como la cantidad de bicicletas que hay en todas las estaciones activas para ese momento, más todos los viajes que se estén realizando. ``` from datetime import datetime import pytz datetime_picker = ipydatetime.DatetimePicker() def total_bikes_at_date(date): if date!=None: df = pd.read_csv("Data/OD_{}.csv".format(date.year)) df.start_date = pd.to_datetime(df.start_date) df.end_date = pd.to_datetime(df.end_date) date = pd.to_datetime(date).tz_localize(tz=None) total = len(df[(date>df.start_date) & (date<df.end_date)]) print("En el momento {} hay {} bicicletas".format(str(date),total)) else: return 0 interact(total_bikes_at_date,date=datetime_picker) ```
github_jupyter
import pandas as pd import matplotlib.pyplot as plt import ipywidgets as widgets from ipywidgets import interact from area import area import ipydatetime def histograma_por_año(año): df = pd.read_csv("Data/OD_{}.csv".format(año)) duration = df["duration_sec"] plt.hist(duration, bins=20 ,histtype='bar', edgecolor='k') plt.xlabel('Duracion del viaje(segundos)') plt.ylabel('Numero de viajes') plt.title('Histograma de tiempos de viaje año {}'.format(año)) return plt.show() interact(histograma_por_año, año=[2014,2015,2016,2017]) def listado_top_estaciones_año(año,N): df = pd.read_csv("Data/OD_{}.csv".format(año)) start_st = pd.DataFrame({'station_code': df['start_station_code']}) end_st = pd.DataFrame({'station_code': df['end_station_code']}) general_st = start_st.append(end_st) start_st = start_st.groupby('station_code').size().reset_index(name='counts') start_st = start_st.sort_values(by=['counts'],ascending=False) start_st = start_st.head(int(N)) start_st.index = range(1, int(N)+1) end_st = end_st.groupby('station_code').size().reset_index(name='counts') end_st = end_st.sort_values(by=['counts'],ascending=False) end_st = end_st.head(int(N)) end_st.index = range(1, int(N)+1) general_st = general_st.groupby('station_code').size().reset_index(name='counts') general_st = general_st.sort_values(by=['counts'],ascending=False) general_st = general_st.head(int(N)) general_st.index = range(1, int(N)+1) informe = pd.concat([start_st,end_st,general_st],axis=1) informe.columns = ["start_station_code","start_count","end_station_code","end_count","general_station_code","general_count"] informe.to_csv("listado_top{}_estaciones_{}.csv".format(int(N),año),index=False) return informe interact(listado_top_estaciones_año, año=[2014,2015,2016,2017],N=(0.0,200.0,1)) def listado_top_viajes_año(año,N): df = pd.read_csv("Data/OD_{}.csv".format(año)) viajes = pd.DataFrame({'start_station_code': df['start_station_code'], 'end_station_code': df['end_station_code']}) viajes["viaje"] = viajes["start_station_code"].astype(str) + '-' + viajes["end_station_code"].astype(str) viajes = viajes.groupby('viaje').size().reset_index(name='counts') viajes = viajes.sort_values(by=['counts'],ascending=False) viajes = viajes.head(int(N)) viajes.index = range(1, int(N)+1) viajes.to_csv("listado_top{}_viajes_{}.csv".format(int(N),año),index=False) return viajes interact(listado_top_viajes_año, año=[2014,2015,2016,2017],N=(0.0,200.0,1)) def histograma_horas_por_año(año): df = pd.read_csv("Data/OD_{}.csv".format(año)) hours = pd.to_datetime(df["start_date"]).dt.hour plt.hist(hours, bins=24 ,histtype='bar', edgecolor='k') plt.xlabel('Hora del viage') plt.xticks(range(0,24)) plt.xlabel('Hora') plt.ylabel('Numero de viajes') plt.title('Histograma de horas puntas de viaje año {}'.format(año)) return plt.show() interact(histograma_horas_por_año, año=[2014,2015,2016,2017]) def comparacion_sistema_año(año1,año2,medida): df1 = pd.read_csv("Data/OD_{}.csv".format(año1)) df2 = pd.read_csv("Data/OD_{}.csv".format(año2)) if medida=="viajes totales": plt.bar([str(año1),str(año2)],[len(df1),len(df2)]) plt.xlabel('Año') plt.ylabel('Numero de viajes') plt.title('Comparacion Nº viajes entre el {} y el {}'.format(año1,año2)) return plt.show() if medida=="tiempo de uso": uso1 = str(pd.Timedelta(df1["duration_sec"].sum(), unit ='s')) uso2 = str(pd.Timedelta(df2["duration_sec"].sum(), unit ='s')) usoTotal = pd.DataFrame({str(año1):uso1, str(año2):uso2},index=[0]) return usoTotal else: start_st1 = pd.DataFrame({'station_code': df1['start_station_code']}) end_st1 = pd.DataFrame({'station_code': df1['end_station_code']}) general_st1 = start_st1.append(end_st1) start_st2 = pd.DataFrame({'station_code': df2['start_station_code']}) end_st2 = pd.DataFrame({'station_code': df2['end_station_code']}) general_st2 = start_st2.append(end_st2) st1 = general_st1.groupby('station_code').size().reset_index(name='counts') st1.index = range(1, len(st1)+1) st2 = general_st2.groupby('station_code').size().reset_index(name='counts') st2.index = range(1, len(st2)+1) comp_est = pd.concat([st1,st2],axis=1) comp_est.columns = ["station_code_"+str(año1),"count_"+str(año1),"station_code_"+str(año2),"count_"+str(año2)] comp_est.to_csv("comparacion_{}_{}_{}.csv".format(año1,año2,medida),index=False) return comp_est interact(comparacion_sistema_año, año1=[2014,2015,2016,2017],año2=[2014,2015,2016,2017], medida=["viajes totales","tiempo de uso", "cantidad de viajes por estacion"]) stations = pd.read_json("Data/stations.json") capacity = pd.json_normalize(stations["stations"]) print("\nCapacidad total : {}".format(capacity['ba'].sum())) def comparacion_capacidad_año(año1,año2): df1 = pd.read_csv("Data/Stations_{}.csv".format(año1)) df2 = pd.read_csv("Data/Stations_{}.csv".format(año2)) stations = pd.read_json("Data/stations.json") capacity = pd.json_normalize(stations["stations"]) sum1 = capacity[capacity.n.astype(int).isin(df1["code"])] sum2 = capacity[capacity.n.astype(int).isin(df2["code"])] change = sum2['ba'].sum() - sum1['ba'].sum() print("El cambio de capacidad entre los años {} y {} ha sido de : {} unidades".format(año1,año2,change)) interact(comparacion_capacidad_año, año1=[2014,2015,2016,2017],año2=[2014,2015,2016,2017]) def ampliacion_red_año(año1,año2): df1 = pd.read_csv("Data/Stations_{}.csv".format(año1)) df2 = pd.read_csv("Data/Stations_{}.csv".format(año2)) coordinates1 = [] for i in range(len(df1)): coordinates1.append([df1["latitude"][i],df1["longitude"][i]]) coordinates2 = [] for i in range(len(df2)): coordinates2.append([df2["latitude"][i],df2["longitude"][i]]) coordinates1.sort(key = lambda x: (-x[0], x[1])) coordinates2.sort(key = lambda x: (-x[0], x[1])) obj1 = {'type':'Polygon','coordinates':[coordinates1]} obj2 = {'type':'Polygon','coordinates':[coordinates2]} area1 = area(obj1) area2 = area(obj2) print("La diferencia entre la cobertura de la red entre el {} y el {} es: {} m".format(año1,año2,area2-area1)) interact(ampliacion_red_año, año1=[2014,2015,2016,2017],año2=[2014,2015,2016,2017]) def densidad_red_año(año1,año2): df1 = pd.read_csv("Data/Stations_{}.csv".format(año1)) df2 = pd.read_csv("Data/Stations_{}.csv".format(año2)) coordinates1 = [] for i in range(len(df1)): coordinates1.append([df1["latitude"][i],df1["longitude"][i]]) coordinates2 = [] for i in range(len(df2)): coordinates2.append([df2["latitude"][i],df2["longitude"][i]]) coordinates1.sort(key = lambda x: (-x[0], x[1])) coordinates2.sort(key = lambda x: (-x[0], x[1])) obj1 = {'type':'Polygon','coordinates':[coordinates1]} obj2 = {'type':'Polygon','coordinates':[coordinates2]} densidad1 = area(obj1) / len(df1) densidad2 = area(obj2) / len(df2) print("La diferencia entre la densidad de la red entre el {} y el {} es: {}".format(año1,año2,densidad2-densidad1)) interact(densidad_red_año, año1=[2014,2015,2016,2017],año2=[2014,2015,2016,2017]) from math import sin, cos, sqrt, atan2, radians def velocidad_año(año): df = pd.read_csv("Data/OD_{}.csv".format(año)) stations = pd.read_json("Data/stations.json") stations = pd.json_normalize(stations["stations"]) stations.n = stations.n.astype(int) start = pd.DataFrame({'n': df["start_station_code"]}) result_start = pd.merge(start,stations,on='n',how="left") end = pd.DataFrame({'n': df["end_station_code"]}) result_end = pd.merge(end,stations,on='n',how="left") combine = pd.concat([result_start[["n","id","la","lo"]],result_end[["n","id","la","lo"]],df["duration_sec"]],axis=1) combine.columns = ["start_station_code","id","start_station_la","start_station_lo", "end_station_code","end_station_id","end_station_la","end_station_lo","duration_sec"] R = 6373.0 lat1 = combine["start_station_la"].map(radians) lon1 = combine["start_station_lo"].map(radians) lat2 = combine["end_station_la"].map(radians) lon2 = combine["end_station_lo"].map(radians) dlon = lon2 - lon1 dlat = lat2 - lat1 a = (dlat/2).map(sin)**2 + lat1.map(cos) * lat2.map(cos) * (dlon/2).map(sin)**2 c = 2 * a.map(sqrt).map(asin) distance = R * c combine["speed"] = distance / (combine["duration_sec"]/3600) print("La velocidad media de los ciclistas en el año {} es: {} km/h".format(año,combine["speed"].mean())) interact(velocidad_año, año=[2014,2015,2016,2017]) from datetime import datetime import pytz datetime_picker = ipydatetime.DatetimePicker() def total_bikes_at_date(date): if date!=None: df = pd.read_csv("Data/OD_{}.csv".format(date.year)) df.start_date = pd.to_datetime(df.start_date) df.end_date = pd.to_datetime(df.end_date) date = pd.to_datetime(date).tz_localize(tz=None) total = len(df[(date>df.start_date) & (date<df.end_date)]) print("En el momento {} hay {} bicicletas".format(str(date),total)) else: return 0 interact(total_bikes_at_date,date=datetime_picker)
0.284278
0.845209
``` import pandas as pd import os from datetime import datetime, timedelta from sqlalchemy import create_engine import numpy as np import matplotlib.pyplot as plt import seaborn as seabornInstance from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn import metrics from fbprophet import Prophet from fbprophet.plot import plot_plotly, add_changepoints_to_plot # Create a POSTGRES database with the name 'COVID19_db' # Replace username:password if it's not set to postgres:postgres DATABASE_URI = os.environ.get('DATABASE_URL', '') or "postgresql://postgres:postgres@localhost:5432/COVID19_db" print(DATABASE_URI) engine = create_engine(DATABASE_URI) # daily US confirmed, recovered, hospitalized, death, new_cases, new_hospitalizations, new_deaths daily_df = pd.read_sql("select distinct date, sum(positive) as confirmed, sum(recovered) as recovered,sum(hospitalized) as hospitalized, sum(death) as death,sum(pos_inc) as new_cases,sum(hospital_inc) as new_hospitalizations,sum(death_inc) as new_deaths from covid_data_states group by date order by date", con=engine) daily_df['confirmed']=daily_df['confirmed'].astype(int) daily_df['recovered']=daily_df['recovered'].astype(int) daily_df['hospitalized']=daily_df['hospitalized'].astype(int) daily_df['death']=daily_df['death'].astype(int) daily_df['new_cases']=daily_df['new_cases'].astype(int) daily_df['new_hospitalizations']=daily_df['new_hospitalizations'].astype(int) daily_df['new_deaths']=daily_df['new_deaths'].astype(int) daily_df['date'] = pd.to_datetime(daily_df['date']) # Create confirmed, hospitalized, death, new cases, new hospitalizations, new deaths DataFrame confirmed_df=daily_df[['date','confirmed']] hospitalized_df=daily_df[['date','hospitalized']] death_df=daily_df[['date','death']] new_case_df=daily_df[['date','new_cases']] new_hos_df=daily_df[['date','new_hospitalizations']] new_death_df=daily_df[['date','new_deaths']] # Convert DataFrame from long to wide confirmed_df.columns = ['ds','y'] # recovered_df.columns = ['ds','y'] hospitalized_df.columns = ['ds','y'] death_df.columns = ['ds','y'] new_case_df.columns = ['ds','y'] new_hos_df.columns = ['ds','y'] new_death_df.columns = ['ds','y'] ``` m = Prophet(interval_width=0.95, yearly_seasonality=True,seasonality_mode='additive') m.fit(confirmed_df) future = m.make_future_dataframe(periods=7) #predicting the future with date, and upper and lower limit of y value forecast = m.predict(future) confirmed_forecast_plot = m.plot(forecast) m = Prophet(interval_width=0.95, yearly_seasonality=True,seasonality_mode='additive') m.fit(hospitalized_df) future = m.make_future_dataframe(periods=7) forecast = m.predict(future) confirmed_forecast_plot = m.plot(forecast) m = Prophet(interval_width=0.95, yearly_seasonality=True,seasonality_mode='additive') m.fit(death_df) future = m.make_future_dataframe(periods=7) forecast = m.predict(future) confirmed_forecast_plot = m.plot(forecast) m = Prophet(interval_width=0.95, yearly_seasonality=True,seasonality_mode='additive') m.fit(new_case_df) future = m.make_future_dataframe(periods=7) forecast = m.predict(future) confirmed_forecast_plot = m.plot(forecast) m = Prophet(interval_width=0.95, yearly_seasonality=True,seasonality_mode='additive') m.fit(new_hos_df) future = m.make_future_dataframe(periods=7) forecast = m.predict(future) confirmed_forecast_plot = m.plot(forecast) m = Prophet(interval_width=0.95, yearly_seasonality=True,seasonality_mode='multiplicative') m.fit(new_hos_df) future = m.make_future_dataframe(periods=7) forecast = m.predict(future) confirmed_forecast_plot = m.plot(forecast) m = Prophet(interval_width=0.95,yearly_seasonality=True,seasonality_mode='additive') m.fit(new_hos_df) future = m.make_future_dataframe(periods=7) forecast = m.predict(future) confirmed_forecast_plot = m.plot(forecast) m = Prophet(interval_width=0.95, yearly_seasonality=True,seasonality_mode='additive') m.fit(new_death_df) future = m.make_future_dataframe(periods=7) forecast = m.predict(future) confirmed_forecast_plot = m.plot(forecast) confirmed_df.columns ``` def forecast(df,i): y_label=['Confirmed','New Cases','Hospitalized','New Hospitalizations','Deaths','New Deaths'] m = Prophet(interval_width=0.95, yearly_seasonality=True,seasonality_mode='additive') m.fit(df) future = m.make_future_dataframe(periods=10) forecast = m.predict(future) m.plot(forecast) plt.ylabel(y_label[i]) plt.xlabel('Date') plt.savefig("../static/images/prediction"+str(i)+".jpg") forecast(confirmed_df,0) forecast(new_case_df,1) forecast(hospitalized_df,2) forecast(new_hos_df,3) forecast(death_df,4) forecast(new_death_df,5) ``` m.add_seasonality(name='monthly', period=30.5, fourier_order=5) forecast = m.fit(confirmed_df).predict(future) # fig = m.plot_components(forecast) forecast = m.predict(future) confirmed_forecast_plot = m.plot(forecast)
github_jupyter
import pandas as pd import os from datetime import datetime, timedelta from sqlalchemy import create_engine import numpy as np import matplotlib.pyplot as plt import seaborn as seabornInstance from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn import metrics from fbprophet import Prophet from fbprophet.plot import plot_plotly, add_changepoints_to_plot # Create a POSTGRES database with the name 'COVID19_db' # Replace username:password if it's not set to postgres:postgres DATABASE_URI = os.environ.get('DATABASE_URL', '') or "postgresql://postgres:postgres@localhost:5432/COVID19_db" print(DATABASE_URI) engine = create_engine(DATABASE_URI) # daily US confirmed, recovered, hospitalized, death, new_cases, new_hospitalizations, new_deaths daily_df = pd.read_sql("select distinct date, sum(positive) as confirmed, sum(recovered) as recovered,sum(hospitalized) as hospitalized, sum(death) as death,sum(pos_inc) as new_cases,sum(hospital_inc) as new_hospitalizations,sum(death_inc) as new_deaths from covid_data_states group by date order by date", con=engine) daily_df['confirmed']=daily_df['confirmed'].astype(int) daily_df['recovered']=daily_df['recovered'].astype(int) daily_df['hospitalized']=daily_df['hospitalized'].astype(int) daily_df['death']=daily_df['death'].astype(int) daily_df['new_cases']=daily_df['new_cases'].astype(int) daily_df['new_hospitalizations']=daily_df['new_hospitalizations'].astype(int) daily_df['new_deaths']=daily_df['new_deaths'].astype(int) daily_df['date'] = pd.to_datetime(daily_df['date']) # Create confirmed, hospitalized, death, new cases, new hospitalizations, new deaths DataFrame confirmed_df=daily_df[['date','confirmed']] hospitalized_df=daily_df[['date','hospitalized']] death_df=daily_df[['date','death']] new_case_df=daily_df[['date','new_cases']] new_hos_df=daily_df[['date','new_hospitalizations']] new_death_df=daily_df[['date','new_deaths']] # Convert DataFrame from long to wide confirmed_df.columns = ['ds','y'] # recovered_df.columns = ['ds','y'] hospitalized_df.columns = ['ds','y'] death_df.columns = ['ds','y'] new_case_df.columns = ['ds','y'] new_hos_df.columns = ['ds','y'] new_death_df.columns = ['ds','y'] def forecast(df,i): y_label=['Confirmed','New Cases','Hospitalized','New Hospitalizations','Deaths','New Deaths'] m = Prophet(interval_width=0.95, yearly_seasonality=True,seasonality_mode='additive') m.fit(df) future = m.make_future_dataframe(periods=10) forecast = m.predict(future) m.plot(forecast) plt.ylabel(y_label[i]) plt.xlabel('Date') plt.savefig("../static/images/prediction"+str(i)+".jpg") forecast(confirmed_df,0) forecast(new_case_df,1) forecast(hospitalized_df,2) forecast(new_hos_df,3) forecast(death_df,4) forecast(new_death_df,5)
0.354321
0.46721
# IMPORTS ## Libraries ``` import pandas as pd import numpy as np import datetime import warnings warnings.filterwarnings("ignore") import seaborn as sns import matplotlib.pyplot as plt from IPython.core.display import HTML from sklearn.preprocessing import RobustScaler, MinMaxScaler, LabelEncoder from sklearn.ensemble import RandomForestRegressor from boruta import BorutaPy ``` ## Helper Functions ``` def jupyter_settings(): %matplotlib inline %pylab inline plt.style.use( 'bmh' ) plt.rcParams['figure.figsize'] = [25, 16] plt.rcParams['font.size'] = 24 display( HTML( '<style>.container { width:100% !important; }</style>') ) pd.options.display.max_columns = None pd.options.display.max_rows = None pd.set_option( 'display.expand_frame_repr', False ) sns.set() jupyter_settings() ``` ## Loading Data ``` dfRaw = pd.read_csv('../../01-Data/Results/01-FirstRoundCRISP/dfDataPreparation.csv', low_memory=False, parse_dates=['Date']) ``` # FEATURE SELECTION ``` dfRaw1 = dfRaw.copy() ``` ## Split DataFrame into Training and Validation Dataset ``` toDrop = ['WeekOfYear', 'Day', 'Month', 'DayOfWeek', 'PromoSince', 'CompetionSinse', 'YearWeek'] dfRaw1 = dfRaw1.drop(toDrop, axis=1) dfRaw1[['Store', 'Date']].groupby('Store').max().reset_index()['Date'][0] - datetime.timedelta(days=6*7) #Training Dataset XTrain = dfRaw1[dfRaw1['Date'] < '2015-06-19'] yTrain = XTrain['Sales'] #Validation Dataset XTest = dfRaw1[dfRaw1['Date'] >= '2015-06-19'] yTest = XTest['Sales'] print('Training Min Date: {}'.format(XTrain['Date'].min())) print('Training Max Date: {}'.format(XTrain['Date'].max())) print('\nTest Min Date: {}'.format(XTest['Date'].min())) print('Test Max Date: {}'.format(XTest['Date'].max())) ``` ## Boruta as Feature Selector ``` # Training and Validation dataset for Boruta XTrainN = XTrain.drop(['Date', 'Sales'], axis=1).to_numpy() yTrainN = yTrain.values.ravel() # Define RandomForestRegressor rf = RandomForestRegressor(n_jobs=-1) #Define Boruta boruta = BorutaPy(rf, n_estimators='auto', verbose=2, random_state=42).fit(XTrainN, yTrainN) ``` ### Best Features Boruta ``` colsSelected = boruta.support_.tolist() # Best Features XTrainFS = XTrain.drop(['Date', 'Sales'], axis=1) colsSelectedBoruta = XTrainFS.iloc[:, colsSelected].columns.to_list() colsNotSelectBoruta = list(np.setdiff1d(XTrainFS.columns, colsSelectedBoruta)) colsSelectedBoruta #MonthSin #WeekofYear colsSelectedBoruta = [ 'Store', 'Promo', 'StoreType', 'Assortment', 'CompetitionDistance', 'CompetitionOpenSinceMonth', 'CompetitionOpenSinceYear', 'Promo2', 'Promo2SinceWeek', 'Promo2SinceYear', 'CompetionTimeMonth', 'PromoTimeWeek', 'MonthSin', 'MonthCos', 'DaySin', 'DayCos', 'WeekOfYearSin', 'WeekOfYearCos', 'DayOfWeekSin', 'DayOfWeekCos'] # Columns to Add featToAdd = ['Date', 'Sales'] colsSelectedBoruta.extend(featToAdd) colsSelectedBoruta ```
github_jupyter
import pandas as pd import numpy as np import datetime import warnings warnings.filterwarnings("ignore") import seaborn as sns import matplotlib.pyplot as plt from IPython.core.display import HTML from sklearn.preprocessing import RobustScaler, MinMaxScaler, LabelEncoder from sklearn.ensemble import RandomForestRegressor from boruta import BorutaPy def jupyter_settings(): %matplotlib inline %pylab inline plt.style.use( 'bmh' ) plt.rcParams['figure.figsize'] = [25, 16] plt.rcParams['font.size'] = 24 display( HTML( '<style>.container { width:100% !important; }</style>') ) pd.options.display.max_columns = None pd.options.display.max_rows = None pd.set_option( 'display.expand_frame_repr', False ) sns.set() jupyter_settings() dfRaw = pd.read_csv('../../01-Data/Results/01-FirstRoundCRISP/dfDataPreparation.csv', low_memory=False, parse_dates=['Date']) dfRaw1 = dfRaw.copy() toDrop = ['WeekOfYear', 'Day', 'Month', 'DayOfWeek', 'PromoSince', 'CompetionSinse', 'YearWeek'] dfRaw1 = dfRaw1.drop(toDrop, axis=1) dfRaw1[['Store', 'Date']].groupby('Store').max().reset_index()['Date'][0] - datetime.timedelta(days=6*7) #Training Dataset XTrain = dfRaw1[dfRaw1['Date'] < '2015-06-19'] yTrain = XTrain['Sales'] #Validation Dataset XTest = dfRaw1[dfRaw1['Date'] >= '2015-06-19'] yTest = XTest['Sales'] print('Training Min Date: {}'.format(XTrain['Date'].min())) print('Training Max Date: {}'.format(XTrain['Date'].max())) print('\nTest Min Date: {}'.format(XTest['Date'].min())) print('Test Max Date: {}'.format(XTest['Date'].max())) # Training and Validation dataset for Boruta XTrainN = XTrain.drop(['Date', 'Sales'], axis=1).to_numpy() yTrainN = yTrain.values.ravel() # Define RandomForestRegressor rf = RandomForestRegressor(n_jobs=-1) #Define Boruta boruta = BorutaPy(rf, n_estimators='auto', verbose=2, random_state=42).fit(XTrainN, yTrainN) colsSelected = boruta.support_.tolist() # Best Features XTrainFS = XTrain.drop(['Date', 'Sales'], axis=1) colsSelectedBoruta = XTrainFS.iloc[:, colsSelected].columns.to_list() colsNotSelectBoruta = list(np.setdiff1d(XTrainFS.columns, colsSelectedBoruta)) colsSelectedBoruta #MonthSin #WeekofYear colsSelectedBoruta = [ 'Store', 'Promo', 'StoreType', 'Assortment', 'CompetitionDistance', 'CompetitionOpenSinceMonth', 'CompetitionOpenSinceYear', 'Promo2', 'Promo2SinceWeek', 'Promo2SinceYear', 'CompetionTimeMonth', 'PromoTimeWeek', 'MonthSin', 'MonthCos', 'DaySin', 'DayCos', 'WeekOfYearSin', 'WeekOfYearCos', 'DayOfWeekSin', 'DayOfWeekCos'] # Columns to Add featToAdd = ['Date', 'Sales'] colsSelectedBoruta.extend(featToAdd) colsSelectedBoruta
0.425725
0.721596
**Brief Honor Code**. Do the homework on your own. You may discuss ideas with your classmates, but DO NOT copy the solutions from someone else or the Internet. If stuck, discuss with TA. **1**. (50 points) Write separate `toolz` pipelines to generate the following variables - words: a list of all the words in the files `fortune?.txt` in the `data` directory - reverse_index: a reverse index of words (key=position, value=word) - index: an index of words (key=word, value=position) - cat: a list containing the categorical encoding of words Finally, use `numpy` to convert `cat` into a one-hot matrix with shape (#words, #unique words) ``` import os import glob import string import re import imageio import numpy as np import matplotlib.pyplot as plt paths = glob.glob(os.path.join('data', 'fortune*.txt')) import toolz as tz import toolz.curried as c from functools import partial, lru_cache from functools import reduce words= tz.pipe( sorted(paths), c.map(lambda x: open(x,encoding = "utf-8")), c.map(lambda x: x.read()), c.map(lambda x: x.lower()), c.map(lambda x: x.translate(str.maketrans('', '', string.punctuation))), c.map(lambda x: x.split()), c.reduce(lambda x,y: x+y), list ) words reverse_index= tz.pipe( words, tz.unique, enumerate, dict) print(reverse_index) index = tz.pipe( words, tz.unique, enumerate, dict, lambda x: {value:key for key, value in x.items()} ) print(index) cat = tz.pipe( words, c.map(lambda x : index[x]), list ) print(cat) n = len(cat) p = len(index) m = np.zeros((n,p), dtype='int') i = np.arange(len(cat)) m[i, cat] = 1 m ``` **2**. (50 points) Write a simulation of diffusion-limited aggregation. In this simulation, we have $n$ random walkers. Each walker starts from row 0 and a random column number, and in each step, the walker increases the row number by 1 and randomly increments or decrements its column number by 1. If the column number of the walker exceeds the maximum or becomes negative, the walker emerges on the other side (toroidal boundary conditions). At any time, if any of the walkers 8 neighbors is non-zero, the walker stops in that position, and the number of steps taken is recorded in that (row, column). Write a function `dla(nwalkers, width, height, seed)` that returns a matrix with shape (width, height) after running `nwalkers` random walks as described above. The argument `ssed` is used to initialize a random number seed. Internally, the function should create a (width, height+1) matrix, and initialize the last row to have 1 with all other entries 0. Feel free to use loops. This function is not easily vectorized. Plot the returned matrix for the arguments `nwalkers=10000, width=300, height=150, seed=123`. It should look like this: ![dla](figs/dla.png) ``` def dla(nwalkers, width, height, seed) : """ This function will show the plot for random walkers nwalkers is the number of walkers needed width of the plot height of plot the seed set so that it's psudo random """ np.random.seed(seed=seed) height = height width = width x0 = np.zeros((height, width)) x1 = np.vstack((x0,np.ones(width).reshape(1,-1))) #create position matrix x2 = np.hstack([np.zeros(height+1).reshape(-1,1),x1,np.zeros(height+1).reshape(-1,1)]) for i in range(nwalkers): col = np.random.choice(range(1,width + 1)) start = np.array([0, col]) sumtotal = 0 stepcount = 0 while sumtotal == 0: lw = np.random.choice([-1,1]) step = np.array([1,lw]) x = start + step if(x[1] == width + 1): x[1] = 1 elif(x[1] == 0): x[1] = width check = [(x[0]+k, x[1]+j) for k in range(-1,2) for j in range(-1,2)] sumtotal = np.sum([x2[check[t][0], check[t][1]] for t in range(len(check))]) start = x x2[start[0], start[1]] = start[0] x2[:, -1]= x2[:, 1] = x1[:,0] x2[:, 0] = x2[:,-2]= x1[:,-1] #deseclt the columns and rows create for operation x3 = x2[:-1, 1: width] plt.imshow(x3) plt.tick_params( axis='both', which='both', bottom='off', top='off', labelbottom='off', right='off', left='off', labelleft='off') plt.show() dla(10000, 300, 150, 123) ```
github_jupyter
import os import glob import string import re import imageio import numpy as np import matplotlib.pyplot as plt paths = glob.glob(os.path.join('data', 'fortune*.txt')) import toolz as tz import toolz.curried as c from functools import partial, lru_cache from functools import reduce words= tz.pipe( sorted(paths), c.map(lambda x: open(x,encoding = "utf-8")), c.map(lambda x: x.read()), c.map(lambda x: x.lower()), c.map(lambda x: x.translate(str.maketrans('', '', string.punctuation))), c.map(lambda x: x.split()), c.reduce(lambda x,y: x+y), list ) words reverse_index= tz.pipe( words, tz.unique, enumerate, dict) print(reverse_index) index = tz.pipe( words, tz.unique, enumerate, dict, lambda x: {value:key for key, value in x.items()} ) print(index) cat = tz.pipe( words, c.map(lambda x : index[x]), list ) print(cat) n = len(cat) p = len(index) m = np.zeros((n,p), dtype='int') i = np.arange(len(cat)) m[i, cat] = 1 m def dla(nwalkers, width, height, seed) : """ This function will show the plot for random walkers nwalkers is the number of walkers needed width of the plot height of plot the seed set so that it's psudo random """ np.random.seed(seed=seed) height = height width = width x0 = np.zeros((height, width)) x1 = np.vstack((x0,np.ones(width).reshape(1,-1))) #create position matrix x2 = np.hstack([np.zeros(height+1).reshape(-1,1),x1,np.zeros(height+1).reshape(-1,1)]) for i in range(nwalkers): col = np.random.choice(range(1,width + 1)) start = np.array([0, col]) sumtotal = 0 stepcount = 0 while sumtotal == 0: lw = np.random.choice([-1,1]) step = np.array([1,lw]) x = start + step if(x[1] == width + 1): x[1] = 1 elif(x[1] == 0): x[1] = width check = [(x[0]+k, x[1]+j) for k in range(-1,2) for j in range(-1,2)] sumtotal = np.sum([x2[check[t][0], check[t][1]] for t in range(len(check))]) start = x x2[start[0], start[1]] = start[0] x2[:, -1]= x2[:, 1] = x1[:,0] x2[:, 0] = x2[:,-2]= x1[:,-1] #deseclt the columns and rows create for operation x3 = x2[:-1, 1: width] plt.imshow(x3) plt.tick_params( axis='both', which='both', bottom='off', top='off', labelbottom='off', right='off', left='off', labelleft='off') plt.show() dla(10000, 300, 150, 123)
0.24608
0.884439
**[Intro to Game AI and Reinforcement Learning Home Page](https://www.kaggle.com/learn/intro-to-game-ai-and-reinforcement-learning)** --- # Introduction In the tutorial, you learned a bit about reinforcement learning and used the `stable-baselines` package to train an agent to beat a random opponent. In this exercise, you will check your understanding and tinker with the code to deepen your intuition. ``` from learntools.core import binder binder.bind(globals()) from learntools.game_ai.ex4 import * ``` ### 1) Set the architecture In the tutorial, you learned one way to design a neural network that can select moves in Connect Four. The neural network had an output layer with seven nodes: one for each column in the game board. Say now you wanted to create a neural network that can play chess. How many nodes should you put in the output layer? - Option A: 2 nodes (number of game players) - Option B: 16 nodes (number of game pieces that each player starts with) - Option C: 4672 nodes (number of possible moves) - Option D: 64 nodes (number of squares on the game board) Use your answer to set the value of the `best_option` variable below. Your answer should be one of `'A'`, `'B'`, `'C'`, or `'D'`. ``` # Fill in the blank best_option = 'C' # Check your answer q_1.check() # Lines below will give you solution code #q_1.solution() ``` ### 2) Decide reward In the tutorial, you learned how to give your agent a reward that encourages it to win games of Connect Four. Consider now training an agent to win at the game [Minesweeper](https://bit.ly/2T5xEY8). The goal of the game is to clear the board without detonating any bombs. To play this game in Google Search, click on the **[Play]** button at [this link](https://www.google.com/search?q=minesweeper). <center> <img src="https://i.imgur.com/WzoEfKY.png" width=50%><br/> </center> With each move, one of the following is true: - The agent selected an invalid move (in other words, it tried to uncover a square that was uncovered as part of a previous move). Let's assume this ends the game, and the agent loses. - The agent clears a square that did not contain a hidden mine. The agent wins the game, because all squares without mines are revealed. - The agent clears a square that did not contain a hidden mine, but has not yet won or lost the game. - The agent detonates a mine and loses the game. How might you specify the reward for each of these four cases, so that by maximizing the cumulative reward, the agent will try to win the game? After you have decided on your answer, run the code cell below to get credit for completing this question. ``` # Check your answer (Run this code cell to receive credit!) q_2.solution() ``` ### 3) (Optional) Amend the code In this next part of the exercise, you will amend the code from the tutorial to experiment with creating your own agents! There are a lot of hyperparameters involved with specifying a reinforcement learning agent, and you'll have a chance to amend them, to see how performance is affected. First, we'll need to make sure that your Kaggle Notebook is set up to run the code. Begin by looking at the "Settings" menu to the right of your notebook. Your menu will look like one of the following: <center> <img src="https://i.imgur.com/kR1az0y.png" width=100%><br/> </center> If your "Internet" setting appears as a "Requires phone verification" link, click on this link. This will bring you to a new window; then, follow the instructions to verify your account. After following this step, your "Internet" setting will appear "Off", as in the example to the right. Once your "Internet" setting appears as "Off", click to turn it on. You'll see a pop-up window that you'll need to "Accept" in order to complete the process and have the setting switched to "On". Once the Internet is turned "On", you're ready to proceed! <center> <img src="https://i.imgur.com/gOVh6Aa.png" width=100%><br/> </center> Begin by running the code cell below. ``` import os import random import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline !pip install 'tensorflow==1.15.0' import tensorflow as tf from kaggle_environments import make, evaluate from gym import spaces !apt-get update !apt-get install -y cmake libopenmpi-dev python3-dev zlib1g-dev !pip install "stable-baselines[mpi]==2.9.0" from stable_baselines.bench import Monitor from stable_baselines.common.vec_env import DummyVecEnv from stable_baselines import PPO1, A2C, ACER, ACKTR, TRPO from stable_baselines.a2c.utils import conv, linear, conv_to_fc from stable_baselines.common.policies import CnnPolicy class ConnectFourGym: def __init__(self, agent2="random"): ks_env = make("connectx", debug=True) self.env = ks_env.train([None, agent2]) self.rows = ks_env.configuration.rows self.columns = ks_env.configuration.columns # Learn about spaces here: http://gym.openai.com/docs/#spaces self.action_space = spaces.Discrete(self.columns) self.observation_space = spaces.Box(low=0, high=2, shape=(self.rows,self.columns,1), dtype=np.int) # Tuple corresponding to the min and max possible rewards self.reward_range = (-10, 1) # StableBaselines throws error if these are not defined self.spec = None self.metadata = None def reset(self): self.obs = self.env.reset() return np.array(self.obs['board']).reshape(self.rows,self.columns,1) def change_reward(self, old_reward, done): if old_reward == 1: # The agent won the game return 1 elif done: # The opponent won the game return -1 else: # Reward 1/42 return 1/(self.rows*self.columns) def step(self, action): # Check if agent's move is valid is_valid = (self.obs['board'][int(action)] == 0) if is_valid: # Play the move self.obs, old_reward, done, _ = self.env.step(int(action)) reward = self.change_reward(old_reward, done) else: # End the game and penalize agent reward, done, _ = -10, True, {} return np.array(self.obs['board']).reshape(self.rows,self.columns,1), reward, done, _ # Create ConnectFour environment env = ConnectFourGym(agent2="random") # Create directory for logging training information log_dir = "log/" os.makedirs(log_dir, exist_ok=True) # Logging progress monitor_env = Monitor(env, log_dir, allow_early_resets=True) # Create a vectorized environment vec_env = DummyVecEnv([lambda: monitor_env]) # Neural network for predicting action values def modified_cnn(scaled_images, **kwargs): activ = tf.nn.relu layer_1 = activ(conv(scaled_images, 'c1', n_filters=32, filter_size=3, stride=1, init_scale=np.sqrt(2), **kwargs)) layer_2 = activ(conv(layer_1, 'c2', n_filters=64, filter_size=3, stride=1, init_scale=np.sqrt(2), **kwargs)) layer_2 = conv_to_fc(layer_2) return activ(linear(layer_2, 'fc1', n_hidden=512, init_scale=np.sqrt(2))) class CustomCnnPolicy(CnnPolicy): def __init__(self, *args, **kwargs): super(CustomCnnPolicy, self).__init__(*args, **kwargs, cnn_extractor=modified_cnn) ``` Next, run the code cell below to train an agent with PPO and view how the rewards evolved during training. This code is identical to the code from the tutorial. ``` # Initialize agent model = ACER(CustomCnnPolicy, vec_env, verbose=0) # Train agent model.learn(total_timesteps=100000) # Plot cumulative reward with open(os.path.join(log_dir, "monitor.csv"), 'rt') as fh: firstline = fh.readline() assert firstline[0] == '#' df = pd.read_csv(fh, index_col=None)['r'] df.rolling(window=1000).mean().plot() plt.show() ``` If your agent trained well, the plot (which shows average cumulative rewards) should increase over time. Once you have verified that the code runs, try making amendments to see if you can get increased performance. You might like to: - change `PPO1` to `A2C` (or `ACER` or `ACKTR` or `TRPO`) when defining the model in this line of code: `model = PPO1(CustomCnnPolicy, vec_env, verbose=0)`. This will let you see how performance can be affected by changing the algorithm from Proximal Policy Optimization [PPO] to one of: - Advantage Actor-Critic (A2C), - or Actor-Critic with Experience Replay (ACER), - Actor Critic using Kronecker-factored Trust Region (ACKTR), or - Trust Region Policy Optimization (TRPO). - modify the `change_reward()` method in the `ConnectFourGym` class to change the rewards that the agent receives in different conditions. You may also need to modify `self.reward_range` in the `__init__` method (this tuple should always correspond to the minimum and maximum reward that the agent can receive). - change `agent2` to a different agent when creating the ConnectFour environment with `env = ConnectFourGym(agent2="random")`. For instance, you might like to use the `"negamax"` agent, or a different, custom agent. Note that the smarter you make the opponent, the harder it will be for your agent to train! # Congratulations! You have completed the course, and it's time to put your new skills to work! The next step is to apply what you've learned to a **[more complex game: Halite](https://www.kaggle.com/c/halite)**. For a step-by-step tutorial in how to make your first submission to this competition, **[check out the bonus lesson](https://www.kaggle.com/alexisbcook/getting-started-with-halite)**! You can find more games as they're released on the **[Kaggle Simulations page](https://www.kaggle.com/simulations)**. As we did in the course, we recommend that you start simple, with an agent that follows your precise instructions. This will allow you to learn more about the mechanics of the game and to build intuition for what makes a good agent. Then, gradually increase the complexity of your agents to climb the leaderboard! --- **[Intro to Game AI and Reinforcement Learning Home Page](https://www.kaggle.com/learn/intro-to-game-ai-and-reinforcement-learning)** *Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum) to chat with other Learners.*
github_jupyter
from learntools.core import binder binder.bind(globals()) from learntools.game_ai.ex4 import * # Fill in the blank best_option = 'C' # Check your answer q_1.check() # Lines below will give you solution code #q_1.solution() # Check your answer (Run this code cell to receive credit!) q_2.solution() import os import random import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline !pip install 'tensorflow==1.15.0' import tensorflow as tf from kaggle_environments import make, evaluate from gym import spaces !apt-get update !apt-get install -y cmake libopenmpi-dev python3-dev zlib1g-dev !pip install "stable-baselines[mpi]==2.9.0" from stable_baselines.bench import Monitor from stable_baselines.common.vec_env import DummyVecEnv from stable_baselines import PPO1, A2C, ACER, ACKTR, TRPO from stable_baselines.a2c.utils import conv, linear, conv_to_fc from stable_baselines.common.policies import CnnPolicy class ConnectFourGym: def __init__(self, agent2="random"): ks_env = make("connectx", debug=True) self.env = ks_env.train([None, agent2]) self.rows = ks_env.configuration.rows self.columns = ks_env.configuration.columns # Learn about spaces here: http://gym.openai.com/docs/#spaces self.action_space = spaces.Discrete(self.columns) self.observation_space = spaces.Box(low=0, high=2, shape=(self.rows,self.columns,1), dtype=np.int) # Tuple corresponding to the min and max possible rewards self.reward_range = (-10, 1) # StableBaselines throws error if these are not defined self.spec = None self.metadata = None def reset(self): self.obs = self.env.reset() return np.array(self.obs['board']).reshape(self.rows,self.columns,1) def change_reward(self, old_reward, done): if old_reward == 1: # The agent won the game return 1 elif done: # The opponent won the game return -1 else: # Reward 1/42 return 1/(self.rows*self.columns) def step(self, action): # Check if agent's move is valid is_valid = (self.obs['board'][int(action)] == 0) if is_valid: # Play the move self.obs, old_reward, done, _ = self.env.step(int(action)) reward = self.change_reward(old_reward, done) else: # End the game and penalize agent reward, done, _ = -10, True, {} return np.array(self.obs['board']).reshape(self.rows,self.columns,1), reward, done, _ # Create ConnectFour environment env = ConnectFourGym(agent2="random") # Create directory for logging training information log_dir = "log/" os.makedirs(log_dir, exist_ok=True) # Logging progress monitor_env = Monitor(env, log_dir, allow_early_resets=True) # Create a vectorized environment vec_env = DummyVecEnv([lambda: monitor_env]) # Neural network for predicting action values def modified_cnn(scaled_images, **kwargs): activ = tf.nn.relu layer_1 = activ(conv(scaled_images, 'c1', n_filters=32, filter_size=3, stride=1, init_scale=np.sqrt(2), **kwargs)) layer_2 = activ(conv(layer_1, 'c2', n_filters=64, filter_size=3, stride=1, init_scale=np.sqrt(2), **kwargs)) layer_2 = conv_to_fc(layer_2) return activ(linear(layer_2, 'fc1', n_hidden=512, init_scale=np.sqrt(2))) class CustomCnnPolicy(CnnPolicy): def __init__(self, *args, **kwargs): super(CustomCnnPolicy, self).__init__(*args, **kwargs, cnn_extractor=modified_cnn) # Initialize agent model = ACER(CustomCnnPolicy, vec_env, verbose=0) # Train agent model.learn(total_timesteps=100000) # Plot cumulative reward with open(os.path.join(log_dir, "monitor.csv"), 'rt') as fh: firstline = fh.readline() assert firstline[0] == '#' df = pd.read_csv(fh, index_col=None)['r'] df.rolling(window=1000).mean().plot() plt.show()
0.72487
0.986559
<center> <img src="../../img/ods_stickers.jpg" /> ## [mlcourse.ai](https://mlcourse.ai) – Open Machine Learning Course ### Individual Project # Predicting Wine Expert Rating <div style="text-align: right;">Author: Maxim Klyuchnikov</div> <hr> <h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc"> <ul class="toc-item"> <li><span><a href="#Project-Description"><span class="toc-item-num"></span>Project Description</a></span> <ul class="toc-item"> <li><span><a href="#Dataset" data-toc-modified-id="Dataset-2.1"><span class="toc-item-num"></span>Dataset</a></span> </li> <li><span><a href="#Features" data-toc-modified-id="Features-2.2"><span class="toc-item-num"></span>Features</a></span> </li> <li><span><a href="#Target" data-toc-modified-id="Target-2.3"><span class="toc-item-num"></span>Target</a></span></li> <li><span><a href="#Our-goal-and-possible-applications" data-toc-modified-id="Our-goal-and-possible-applications-2.4"><span class="toc-item-num"></span>Our goal and possible applications</a></span> </li> </ul> </li> <li><span><a href="#Data-Analysis-and-Cleaning"><span class="toc-item-num"></span>Data Analysis and Cleaning</a></span> <ul class="toc-item"> <li><span><a href="#Country" data-toc-modified-id="Country-3.1"><span class="toc-item-num"></span>Country</a></span> </li> <li><span><a href="#Province,-Region-1-and-Region-2" data-toc-modified-id="Province,-Region-1-and-Region-2-3.2"><span class="toc-item-num"></span>Province, Region 1 and Region 2</a></span> </li> <li><span><a href="#Price" data-toc-modified-id="Price-3.3"><span class="toc-item-num"></span>Price</a></span> </li> <li><span><a href="#Variety" data-toc-modified-id="Variety-3.4"><span class="toc-item-num"></span>Variety</a></span> </li> <li><span><a href="#Title" data-toc-modified-id="Title-3.5"><span class="toc-item-num"></span>Title</a></span></li> <li><span><a href="#Description" data-toc-modified-id="Description-3.6"><span class="toc-item-num"></span>Description</a></span> </li> <li><span><a href="#Taster-and-their-Twitter-Handle" data-toc-modified-id="Taster-and-their-Twitter-Handle-3.7"><span class="toc-item-num"></span>Taster and their Twitter Handle</a></span> </li> <li><span><a href="#Winery-and-Designation" data-toc-modified-id="Winery-and-Designation-3.8"><span class="toc-item-num"></span>Winery and Designation</a></span></li> <li><span><a href="#Target-(Points)" data-toc-modified-id="Target-(Points)-3.9"><span class="toc-item-num"></span>Target (Points)</a></span></li> </ul> </li> <li><span><a href="#Metrics-Selection"><span class="toc-item-num"></span>Metrics Selection</a></span> </li> <li><span><a href="#Model-Selection"><span class="toc-item-num"></span>Model Selection</a></span> </li> <li><span><a href="#Cross-Validation-Selection"><span class="toc-item-num"></span>Cross-Validation Selection</a></span></li> <li><span><a href="#Data-Preprocessing"><span class="toc-item-num"></span>Data Preprocessing</a></span> <ul class="toc-item"> <li><span><a href="#Dealing-with-nulls" data-toc-modified-id="Dealing-with-nulls-7.1"><span class="toc-item-num"></span>Dealing with nulls</a></span></li> <li><span><a href="#Train-test-split" data-toc-modified-id="Train-test-split-7.2"><span class="toc-item-num"></span>Train-test split</a></span></li> <li><span><a href="#Categorical-features-encoding" data-toc-modified-id="Categorical-features-encoding-7.3"><span class="toc-item-num"></span>Categorical features encoding</a></span> </li> <li><span><a href="#Text-vectorization-with-TF-IDF" data-toc-modified-id="Text-vectorization-with-TF-IDF-7.4"><span class="toc-item-num"></span>Text vectorization with TF-IDF</a></span> </li> <li><span><a href="#Scaling-numerical-features" data-toc-modified-id="Scaling-numerical-features-7.5"><span class="toc-item-num"></span>Scaling numerical features</a></span> </li> <li><span><a href="#Getting-features-together" data-toc-modified-id="Getting-features-together-7.6"><span class="toc-item-num"></span>Getting features together</a></span> </li> <li><span><a href="#Getting-preprocessing-steps-together" data-toc-modified-id="Getting-preprocessing-steps-together-7.7"><span class="toc-item-num"></span>Getting preprocessing steps together</a></span> </li> </ul> </li> <li><span><a href="#Training-a-Model"><span class="toc-item-num"></span>Training a Model</a></span> </li> <li><span><a href="#Hyperparameter-Tuning"><span class="toc-item-num"></span>Hyperparameter Tuning</a></span></li> <li><span><a href="#Feature-Engineering"><span class="toc-item-num"></span>Feature Engineering</a></span> <ul class="toc-item"> <li><span><a href="#Winery-+-Designation" data-toc-modified-id="Winery-+-Designation-10.1"><span class="toc-item-num"></span>Winery + Designation</a></span></li> <li><span><a href="#Year-(Vintage)" data-toc-modified-id="Year-(Vintage)-10.2"><span class="toc-item-num"></span>Year (Vintage)</a></span></li> <li><span><a href="#Winery-+-Year" data-toc-modified-id="Winery-+-Year-10.3"><span class="toc-item-num"></span>Winery + Year</a></span> </li> </ul> </li> <li><span><a href="#Retrain-the-Best-Model"><span class="toc-item-num"></span>Retrain the Best Model</a></span></li> <li><span><a href="#Conclusions"><span class="toc-item-num"></span>Conclusions</a></span></li> </ul> </div> ``` import pandas as pd import numpy as np from matplotlib import pyplot as plt import seaborn as sns from scipy import stats import plotly.offline as py import warnings import pycountry from statsmodels.graphics.gofplots import qqplot from wordcloud import WordCloud, STOPWORDS warnings.filterwarnings('ignore') import re from sklearn.linear_model import Ridge, RidgeCV from sklearn.model_selection import train_test_split, cross_val_score, KFold, GridSearchCV from sklearn.metrics import mean_squared_error, mean_absolute_error from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.preprocessing import StandardScaler from scipy.sparse import csr_matrix, hstack from yellowbrick.model_selection import ValidationCurve, LearningCurve py.init_notebook_mode(connected=True) import plotly.graph_objs as go RANDOM_SEED=17 ``` ## Project Description ### Dataset The data is taken from the Kaggle dataset https://www.kaggle.com/zynicide/wine-reviews/home, which in turn was scraped by the dataset's author from https://www.winemag.com/<br> There are lot of reviews from differents experts for the wines from the whole world. Also, some wine-specific information is also provided as a part of the dataset.<br> Dataset consists of the following fields (per info from https://github.com/zackthoutt/wine-deep-learning): ### Features * **Points**: the number of points WineEnthusiast rated the wine on a scale of 1-100 (though they say they only post reviews for wines that score >=80) * **Title**: the title of the wine review, which often contains the vintage if you're interested in extracting that feature * **Variety**: the type of grapes used to make the wine (ie Pinot Noir) * **Description**: a few sentences from a sommelier describing the wine's taste, smell, look, feel, etc. * **Country**: the country that the wine is from * **Province**: the province or state that the wine is from * **Region 1**: the wine growing area in a province or state (ie Napa) * **Region 2**: sometimes there are more specific regions specified within a wine growing area (ie Rutherford inside the Napa Valley), but this value can sometimes be blank * **Winery**: the winery that made the wine * **Designation**: the vineyard within the winery where the grapes that made the wine are from * **Price**: the cost for a bottle of the wine, in US$ * **Taster Name**: name of the person who tasted and reviewed the wine * **Taster Twitter Handle**: Twitter handle for the person who tasted ane reviewed the wine ### Target We have wine rating (**Points**) as a target. Reviewers from the original site provide rating for the wines varying from 80 to 100, here is the details of different ranges: | Range | Mark | Description | |--------|------------|--------------------------------------------------------| | 98–100 | Classic | The pinnacle of quality | | 94–97 | Superb | A great achievement | | 90–93 | Excellent | Highly recommended | | 87–89 | Very | Often good value; well recommended | | 83–86 | Good | Suitable for everyday consumption; often good value | | 80–82 | Acceptable | Can be employed in casual, less-critical circumstances | ### Our goal and possible applications Originally, dataset author collected the data to ```create a predictive model to identify wines through blind tasting like a master sommelier would```. Here we will try to solve simpler, yet useful in real life, task: predict the wine rating based on the wine features and words used in its review. This can have the following practical applications: #### Understanding the unrated wine quality Unlike other beverages, wines comes in overwhelming variety: it's about 10k grapes exists (and their number is growing), they can be blended in different proportions, the grape collection year and growing conditions comes into play, the wine may be seasoned for different amount of time in different types of barrels, etc, etc. So review of the specific wine or lists like "top 10 wines of the season" doesn't make any sense - if you go to 2 different local stores there is a good chance you won't find the same wine in both of them. Finding the specific wine may require journey to another city or even country :) In such conditions it's worth to have a model which may predict the wine quality without having an exact rating given by the expert, but based on the wine features which you can get from the bottle. #### Blind testing the expert predictions While this is an area of purely personal taste, professionals always try to become free from the biases and provide objective observations. Blind testing may allow to find the biases of the specific reviewer.<br> Actually, the model could be used for _cross-validation_ of the expert ratings :) ## Data Analysis and Cleaning Let's download the data from Kaggle, extract them into ```data``` folder and check the main properties of the resulting DataFrame: ``` df = pd.read_csv('data/winemag-data-130k-v2.csv', index_col=0) df.info(memory_usage='deep') ``` As we can see, there are many null values in the data, we need to deal with them later. ``` df.head() ``` Let's check the data for possible categorical features: ``` df.nunique() ``` Looks like the following features can be represented as categorical: * designation * province * region_1 * region_2 * taster_name * taster_twitter_handle * variety * winery Let's explore the data now to get acquainted to the dataset more closely: ### Country ``` plt.figure(figsize=(13, 10)) ax = sns.countplot(y=df.country, order=df.country.value_counts().index, palette='tab10') for p, label in zip(ax.patches, df.country.value_counts()): ax.annotate("{0:,d}".format(label), (p.get_width() + 50, p.get_y() + 0.7)) ax.set_title('Number of wine reviews per country', fontsize=18); ``` We see that we have a lot of the reviews for the wines from US, which can be explained by the fact that the reviewers are mostly located in the US.<br> Also it should be noted that we have countries with less number of reviews which may cause problems. Let's see how the countries are distributed on the map, along with the number if review in them.<br> For the Choropleth to display the coloring in a more understantable way, let's ```log1p```-transform the number of reviews per country: ``` countries = df.groupby('country').size().reset_index() countries.columns = ['name', 'size'] countries.name = countries.name.replace({ # making the country names compatible with pycountry 'England': 'United Kingdom', 'Czech Republic': 'United Kingdom', 'Macedonia': 'Macedonia, Republic of', 'Moldova': 'Moldova, Republic of', 'US': 'United States' }) data = pd.DataFrame(index=countries.index) data['name'] = countries.name data['size'] = countries['size'] data['code'] = countries.apply(lambda x: pycountry.countries.get(name=x['name']), axis=1) data['code'] = data.code.apply(lambda x: x.alpha_3 if x else None) data = data.dropna() choropleth_data = [dict( type='choropleth', locations=data['code'], z=np.log1p(data['size']), #showscale=False, text=data['name'], marker=dict( line=dict( color='rgb(180,180,180)', width=0.5 )), )] layout = dict( title='Number of wine reviews per country, log-transformed', geo=dict( showframe=False, showcoastlines=True, projection=dict( type='natural earth' ) )) fig = dict(data=choropleth_data, layout=layout) py.iplot(fig, validate=False) top_rated_countries = df[['country', 'points']].groupby('country').mean().reset_index().sort_values('points', ascending=False).country[:10] data = df[df.country.isin(top_rated_countries)] plt.figure(figsize=(15, 7)) ax = sns.violinplot(x='country', y='points', data=data, order=top_rated_countries, palette='tab10') ax.set_title('Top 10 countries with highest average rating', fontsize=18); ``` Here we can see that the some of the countries with low number of reviews has pretty high average rating.<br> Probably, it's because wines with the highest potential rating are the first to be reviewed by the experts.<br> The dependency between the **Country** and **Points** is clear. #### Cleaning and transforming Countries with less number of reviews does not have too much predictive power and introduce unnecessary noise, so let's replace them with the name 'Other' instead: ``` vc = df.country.value_counts() df['trans_country'] = df.country.replace(vc[vc < 100].index, 'Other') top_rated_countries = df[['trans_country', 'points']].groupby('trans_country').mean().reset_index().sort_values('points', ascending=False).trans_country[:10] top_rated_countries_data = df[df.trans_country.isin(top_rated_countries)] plt.figure(figsize=(15, 7)) ax = sns.violinplot(x='trans_country', y='points', data=top_rated_countries_data, order=top_rated_countries, palette='tab10') ax.set_title('Top 10 countries with highest average rating', fontsize=18); ``` Now see better distribution of the rating among countries in the top 10 list. ### Province, Region 1 and Region 2 These features are actually parts of the wine location hierarchy, so they better be joined into one field with the **Country**. Let's take a look at them: ``` df[['trans_country', 'province', 'region_1', 'region_2']].head() df[['trans_country', 'province', 'region_1', 'region_2']].nunique() print('Countries with Region 2:', df[~df.region_2.isna()].trans_country.unique()) ``` Looks like **Region 2** is a US-specific feature, but it won't hurt if we include it as well, so we get better categorization for US wines. ``` df['location'] = df.apply(lambda x: ' / '.join([y for y in [str(x['trans_country']), str(x['province']), str(x['region_1']), str(x['region_2'])] if y != 'nan']), axis=1) df.location.head() ``` Now let's try to see if there is a dependency between the **Points** and **Location**: ``` df_top_locations = df[df.location.isin(df.location.value_counts().index[:10])] plt.figure(figsize=(12, 10)) ax = sns.violinplot(y='location', x='points', data=df_top_locations, palette='tab10'); ax.set_title('Wine rating distribution over top 10 locations with highest average rating', fontsize=18); ``` #### Cleaning and transforming Let's see if we can get something from the title: ``` df[['region_1', 'title']].head() ``` As we can see, some regions are repeated in title and even if region is NaN, it is possible to fill it with the value from the title, so let's do it: ``` def extract_region_1(row): if row.region_1 == 'nan': return row.region_1 if not row.title.endswith(')'): return None return row.title[row.title.rindex('(')+1:-1] df.region_1 = df.apply(extract_region_1, axis=1) df[['region_1', 'title']].head() ``` Great, now let's recreate the **Location**: ``` df['location'] = df.apply(lambda x: ' / '.join([y for y in [str(x['trans_country']), str(x['province']), str(x['region_1']), str(x['region_2'])] if y != 'nan']), axis=1) df.location.head() ``` Now let's replace the locations with lower amount of reviews with the name 'Other' ``` vc = df.location.value_counts() df.location = df.location.replace(vc[vc < 2].index, 'Other') ``` ### Price Price is is given in the US$, let's see how it's distributed: ``` plt.figure(figsize=(15, 5)) data = df[~df.price.isna()] plt.scatter(range(data.shape[0]), np.sort(data.price.values)[::-1]) plt.title("Distribution of wine prices", fontsize=18) plt.ylabel('Price'); ``` Wow, there are wines with more than $3000 price. That's not a usual weekend wine :) As we see, the price distribution is very skewed, let's try to log-transform it: ``` plt.figure(figsize=(15, 3)) series_price = df[~df.price.isna()].price.apply(np.log1p) ax = sns.distplot(series_price); ax.set_title("Distribution of wine prices", fontsize=18) ax.set_ylabel('Price (log1p)') ax.set_xlabel(''); ``` Still, it's not normal: ``` print('Shapiro-Wilk test:', stats.shapiro(series_price)) print('Kolmogorov-Smirnov test:', stats.kstest(series_price, cdf='norm')) ``` But not very skewed anymore: ``` print('Skeweness:', series_price.skew()) print('Kurtosis:', series_price.kurt()) ``` Now let's see a connection between the **Price** (not log-transformed) and **Points**: ``` plt.figure(figsize=(15, 5)) ax = sns.regplot(x='points', y='price', data=df, fit_reg=False, x_jitter=True) ax.set_title('Correlation between the wine price and points given', fontsize=18); ``` And now let's see which countries has the most expensive wines (per average): ``` plt.figure(figsize=(13, 7)) data = df[['country', 'price']].groupby('country').mean().reset_index().sort_values('price', ascending=False) ax = sns.barplot(y='country', x='price', data=data, palette='tab10') for p, label in zip(ax.patches, data.price): if np.isnan(label): continue ax.annotate('{0:.2f}'.format(label), (p.get_width() + 0.2, p.get_y() + 0.5)) ax.set_title('Top countries with the most expensive average wine prices'); ``` Insterestingly, we see, for example, Germany, Hungary and France in leaders here, which are also in leaders for average wine rating above. Let's take the countries with the top rated wines and see the prices distribution in them: ``` plt.figure(figsize=(15, 5)) sns.violinplot(x='country', y='price', data=top_rated_countries_data, order=top_rated_countries, palette='tab10'); ``` Weel, not good, the **Price** need to be transformed. #### Cleaning and transforming ``` df['trans_price'] = df.price.apply(np.log1p) top_rated_countries = df[['trans_country', 'points']].groupby('trans_country').mean().reset_index().sort_values('points', ascending=False).trans_country[:10] top_rated_countries_data = df[df.trans_country.isin(top_rated_countries)] plt.figure(figsize=(15, 5)) sns.violinplot(x='trans_country', y='trans_price', data=top_rated_countries_data, order=top_rated_countries, palette='tab10'); plt.figure(figsize=(15, 5)) ax = sns.regplot(x='points', y='trans_price', data=df, fit_reg=False, x_jitter=True) ax.set_title('Correlation between the wine price (log) and points given', fontsize=18); ``` ### Variety Let's see the top 10 varietes with their wine counts: ``` df_top_varieties = df[df.variety.isin(df.variety.value_counts().index[:10])] plt.figure(figsize=(13, 5)) ax = sns.countplot(y=df_top_varieties.variety, order=df_top_varieties.variety.value_counts().index, palette='tab10') for p, label in zip(ax.patches, df_top_varieties.variety.value_counts()): ax.annotate("{0:,d}".format(label), (p.get_width() + 50, p.get_y() + 0.5)) ax.set_title('Number of wines per variety', fontsize=18); ``` Now let's see the dependency between the **Variety** and **Points**: ``` plt.figure(figsize=(14, 10)) ax = sns.violinplot(y='variety', x='points', data=df_top_varieties, palette='tab10', order=df_top_varieties.variety.value_counts().index) ax.set_title('Wine rating distribution over top 10 varietes by wine count', fontsize=18); ``` As we see, somevarietes get higher points than the other and points distribution is also may vary. Variety has the same problem as other categorical features: there are some varietes where almost no samples, but they affect the points heavily: ``` top_rated_varietes = df[['variety', 'points']].groupby('variety').mean().reset_index().sort_values('points', ascending=False).variety[:10] top_rated_varietes_data = df[df.variety.isin(top_rated_varietes)] plt.figure(figsize=(15, 5)) ax = sns.violinplot(x='variety', y='points', data=top_rated_varietes_data, order=top_rated_varietes, palette='tab10'); ax.set_xticklabels(ax.get_xticklabels(), rotation=90); ``` #### Cleaning and transforming ``` vc = df.variety.value_counts() df['trans_variety'] = df.variety.replace(vc[vc < 2].index, 'Other') top_rated_varietes = df[['trans_variety', 'points']].groupby('trans_variety').mean().reset_index().sort_values('points', ascending=False).trans_variety[:10] top_rated_varietes_data = df[df.trans_variety.isin(top_rated_varietes)] plt.figure(figsize=(15, 5)) ax = sns.violinplot(x='trans_variety', y='points', data=top_rated_varietes_data, order=top_rated_varietes, palette='tab10'); ax.set_xticklabels(ax.get_xticklabels(), rotation=90); ``` ### Title ``` df.title.head(10) ``` The title itself seems to be not containing valuable information except that we already used it for filling the nulls in **Region 1** and we can extract a **Year** (vintage) from it, we will do it later. ### Description That a typical textual varible, which we can try to analyze with word clouds. Let's see what experts tell about wines that has low rating: ``` stopwords = set(STOPWORDS) stopwords.update(['wine', 'a', 'about', 'above', 'across', 'after', 'again', 'against', 'all', 'almost', 'alone', 'along', 'already', 'also', 'although', 'always', 'among', 'an', 'and', 'another', 'any', 'anybody', 'anyone', 'anything', 'anywhere', 'are', 'area', 'areas', 'around', 'as', 'ask', 'asked', 'asking', 'asks', 'at', 'away', 'b', 'back', 'backed', 'backing', 'backs', 'be', 'became', 'because', 'become', 'becomes', 'been', 'before', 'began', 'behind', 'being', 'beings', 'best', 'better', 'between', 'big', 'both', 'but', 'by', 'c', 'came', 'can', 'cannot', 'case', 'cases', 'certain', 'certainly', 'clear', 'clearly', 'come', 'could', 'd', 'did', 'differ', 'different', 'differently', 'do', 'does', 'done', 'down', 'down', 'downed', 'downing', 'downs', 'during', 'e', 'each', 'early', 'either', 'end', 'ended', 'ending', 'ends', 'enough', 'even', 'evenly', 'ever', 'every', 'everybody', 'everyone', 'everything', 'everywhere', 'f', 'face', 'faces', 'fact', 'facts', 'far', 'felt', 'few', 'find', 'finds', 'first', 'for', 'four', 'from', 'full', 'fully', 'further', 'furthered', 'furthering', 'furthers', 'g', 'gave', 'general', 'generally', 'get', 'gets', 'give', 'given', 'gives', 'go', 'going', 'good', 'goods', 'got', 'great', 'greater', 'greatest', 'group', 'grouped', 'grouping', 'groups', 'h', 'had', 'has', 'have', 'having', 'he', 'her', 'here', 'herself', 'high', 'high', 'high', 'higher', 'highest', 'him', 'himself', 'his', 'how', 'however', 'i', 'if', 'important', 'in', 'interest', 'interested', 'interesting', 'interests', 'into', 'is', 'it', 'its', 'itself', 'j', 'just', 'k', 'keep', 'keeps', 'kind', 'knew', 'know', 'known', 'knows', 'l', 'large', 'largely', 'last', 'later', 'latest', 'least', 'less', 'let', 'lets', 'like', 'likely', 'long', 'longer', 'longest', 'm', 'made', 'make', 'making', 'man', 'many', 'may', 'me', 'member', 'members', 'men', 'might', 'more', 'most', 'mostly', 'mr', 'mrs', 'much', 'must', 'my', 'myself', 'n', 'necessary', 'need', 'needed', 'needing', 'needs', 'never', 'new', 'new', 'newer', 'newest', 'next', 'no', 'nobody', 'non', 'noone', 'not', 'nothing', 'now', 'nowhere', 'number', 'numbers', 'o', 'of', 'off', 'often', 'old', 'older', 'oldest', 'on', 'once', 'one', 'only', 'open', 'opened', 'opening', 'opens', 'or', 'order', 'ordered', 'ordering', 'orders', 'other', 'others', 'our', 'out', 'over', 'p', 'part', 'parted', 'parting', 'parts', 'per', 'perhaps', 'place', 'places', 'point', 'pointed', 'pointing', 'points', 'possible', 'present', 'presented', 'presenting', 'presents', 'problem', 'problems', 'put', 'puts', 'q', 'quite', 'r', 'rather', 'really', 'right', 'right', 'room', 'rooms', 's', 'said', 'same', 'saw', 'say', 'says', 'second', 'seconds', 'see', 'seem', 'seemed', 'seeming', 'seems', 'sees', 'several', 'shall', 'she', 'should', 'show', 'showed', 'showing', 'shows', 'side', 'sides', 'since', 'small', 'smaller', 'smallest', 'so', 'some', 'somebody', 'someone', 'something', 'somewhere', 'state', 'states', 'still', 'still', 'such', 'sure', 't', 'take', 'taken', 'than', 'that', 'the', 'their', 'them', 'then', 'there', 'therefore', 'these', 'they', 'thing', 'things', 'think', 'thinks', 'this', 'those', 'though', 'thought', 'thoughts', 'three', 'through', 'thus', 'to', 'today', 'together', 'too', 'took', 'toward', 'turn', 'turned', 'turning', 'turns', 'two', 'u', 'under', 'until', 'up', 'upon', 'us', 'use', 'used', 'uses', 'v', 'very', 'w', 'want', 'wanted', 'wanting', 'wants', 'was', 'way', 'ways', 'we', 'well', 'wells', 'went', 'were', 'what', 'when', 'where', 'whether', 'which', 'while', 'who', 'whole', 'whose', 'why', 'will', 'with', 'within', 'without', 'work', 'worked', 'working', 'works', 'would', 'x', 'y', 'year', 'years', 'yet', 'you', 'young', 'younger', 'youngest', 'your', 'yours', 'z']) wordcloud = WordCloud(background_color='white', stopwords=stopwords, max_words=500, max_font_size=200, width=2000, height=800, random_state=RANDOM_SEED).generate(' '.join(df[df.points < 83].description.str.lower())) plt.figure(figsize=(15, 7)) plt.imshow(wordcloud) plt.title("Low Rated Wines Description Word Cloud", fontsize=20) plt.axis('off'); ``` ```bitter```, ```sour```, ```simple```, ```sharp```, ```tart``` - there must be definitely something wrong with these wines! ``` wordcloud = WordCloud(background_color='white', stopwords=stopwords, max_words=500, max_font_size=200, width=2000, height=800, random_state=RANDOM_SEED).generate(' '.join(df[df.points > 97].description.str.lower())) plt.figure(figsize=(15, 7)) plt.imshow(wordcloud) plt.title("High Rated Wines Description Word Cloud", fontsize=20) plt.axis('off'); ``` Oh yeah, much better: ```structured```, ```complex```, ```classic```, ```rich```, ```ripe```, ```powerful```, ```intense``` and other good words which you would expect for the pricey and high rated wines :) ### Taster and their Twitter Handle We don't need these fields per our goals, since we will not have them to perform predictions for the model. ### Winery and Designation Let's skip these features for now to see if we can use them later. ### Target (Points) Let's see how our target is distributed: ``` plt.figure(figsize=(15, 5)) sns.distplot(df.points, kde=False); ``` Well, looks like we have binomial distribution here and while it may look like normally-distributed, the tests don't confirm it: ``` print('Shapiro-Wilk test:', stats.shapiro(df.points)) print('Kolmogorov-Smirnov test:', stats.kstest(df.points, cdf='norm')) ``` Skeweness is pretty low however: ``` print('Skeweness:', df.points.skew()) print('Kurtosis:', df.points.kurt()) ``` Here is the QQ-plot, in addition: ``` plt.rcParams['figure.figsize'] = (7, 7) qqplot(df.points, line='r'); ``` The problem here is that our **Points** has discrete values instead of continuous. Which might tell us that we need to treat this problem as a classification or _Ordered Regression_.<br> But still, simple regression should also work well in our case, even though the data is discrete. ## Metrics Selection There are two most popular metrics which we can choose from: MAE (mean absolute error) and MSE (mean squared error). MSE would be the better choice for this problem because: * our train target does not contain outliers and its variance is relatively low. So we want our model to penalize large errors in predictions, which is an immanent feature of MSE * MSE is smoothly differentiable which makes it easier for calculations In scikit-learn MSE is represented in negative form and has the following name, let's save it: ``` SCORING = 'neg_mean_squared_error' ``` ## Model Selection We have the following meaningful properties of our task: * it's a regression problem * we have relatively much data, >100k samples * since we have some categorical feature candidates withe a lot of unique values + textual data, we can expect that we will have a lot of features, 10k+, and much of them will be important for our predictions We can use both SGD and Ridge giving these properties.<br> While SGD will be much faster than Ridge in this task, it will not give us the same level of accuracy and must be tuned a lot more than Ridge, which essentially has one hyperparameter. So let's use Ridge and see how it will perform: ``` MODEL = Ridge(random_state=RANDOM_SEED) ``` ## Cross-Validation Selection Since our data does not have any heavy specifics, so we can choose simple KFold cross validation for 10 folds, with shuffle: ``` CV = KFold(n_splits=10, shuffle=True, random_state=RANDOM_SEED) ``` ## Data Preprocessing ``` df_full = df.copy(deep=True) df_full = df_full.drop(['country', 'price', 'taster_name', 'taster_twitter_handle', 'variety', 'province', 'region_1', 'region_2'], axis=1) df_full.columns = [x.replace('trans_', '') for x in df_full.columns] df_full.info() ``` ### Dealing with nulls ``` # Fill missing countries with "Other" df_full.country = df_full.country.fillna('Other') # Fill missing locations with "Other" df_full.location = df_full.location.fillna('Other') # Remove samples with missing prices since there are not so much of them and it's and important feature df_full = df_full[~df_full.price.isna()] df_full.info() ``` ### Train-test split ``` df_train, df_test, y_train, y_test = train_test_split(df_full.drop(['points'], axis=1), df_full.points, test_size=0.25, random_state=RANDOM_SEED) df_train.shape, df_test.shape, y_train.shape, y_test.shape ``` Note that we will be processing the train and test sets separately, to not introduce "looking into the future" problem. ### Categorical features encoding ``` categorical_features = ['country', 'variety', 'location'] for feature in categorical_features: categorical = pd.Categorical(df_train[feature].unique()) df_train[feature] = df_train[feature].astype(categorical) df_test[feature] = df_test[feature].astype(categorical) X_train_cat = pd.get_dummies(df_train[categorical_features], sparse=True) X_test_cat = pd.get_dummies(df_test[categorical_features], sparse=True) X_train_cat.shape, X_test_cat.shape ``` ### Text vectorization with TF-IDF ``` tv = TfidfVectorizer(stop_words=stopwords, max_features=10000) X_train_desc = tv.fit_transform(df_train.description) X_test_desc = tv.transform(df_test.description) X_train_desc.shape, X_test_desc.shape ``` ### Scaling numerical features Our model is sensitive to non-centered numeric features, so we need to scale them: ``` ss = StandardScaler() df_train.price = ss.fit_transform(df_train[['price']]) df_test.price = ss.transform(df_test[['price']]) ``` ### Getting features together ``` X_train = csr_matrix(hstack([ df_train[['price']], X_train_cat, X_train_desc, ])) X_test = csr_matrix(hstack([ df_test[['price']], X_test_cat, X_test_desc, ])) X_train.shape, X_test.shape ``` ### Getting preprocessing steps together ``` def prepare_data(df_full, categorical_features): df_train, df_test, y_train, y_test = train_test_split(df_full.drop(['points'], axis=1), df_full.points, test_size=0.25, random_state=RANDOM_SEED) df_train.shape, df_test.shape, y_train.shape, y_test.shape print('processing categorical features') for feature in categorical_features: categorical = pd.Categorical(df_train[feature].unique()) df_train[feature] = df_train[feature].astype(categorical) df_test[feature] = df_test[feature].astype(categorical) print('preparing dummies') X_train_cat = pd.get_dummies(df_train[categorical_features], sparse=True) X_test_cat = pd.get_dummies(df_test[categorical_features], sparse=True) print('extracting word vectors') tv = TfidfVectorizer(stop_words=stopwords, max_features=10000) X_train_desc = tv.fit_transform(df_train.description) X_test_desc = tv.transform(df_test.description) X_train_desc.shape, X_test_desc.shape print('scaling') ss = StandardScaler() df_train.price = ss.fit_transform(df_train[['price']]) df_test.price = ss.transform(df_test[['price']]) df_train.describe() print('combining features') X_train = csr_matrix(hstack([ df_train[['price']], X_train_cat, X_train_desc, ])) X_test = csr_matrix(hstack([ df_test[['price']], X_test_cat, X_test_desc, ])) return X_train, X_test, y_train, y_test X_train, X_test, y_train, y_test = prepare_data(df_full, ['country', 'variety', 'location']) X_train.shape, X_test.shape, y_train.shape, y_test.shape ``` ## Training a Model Let's fit our model for the first time and see how it will perform: ``` def train_and_cv(model, X_train, y_train, X_test, y_test): cvs = cross_val_score(model, X_train, y_train, cv=CV, scoring=SCORING, n_jobs=-1) print('MSE and STD on CV:\t', -cvs.mean(), cvs.std()) model.fit(X_train, y_train) print('MSE on holdout:\t\t', mean_squared_error(MODEL.predict(X_test), y_test)) return model train_and_cv(MODEL, X_train, y_train, X_test, y_test); ``` And the results are pretty good, we already have good relatively low error. But we definitely can improve it even further. Now let's see the learning curve: ``` def plot_learning_curve(model, X_train, y_train): plt.figure(figsize=(10, 5)) viz = LearningCurve(model, cv=CV, train_sizes=np.linspace(.1, 1.0, 10), scoring=SCORING, n_jobs=-1) viz.fit(X_train, y_train) viz.poof() plot_learning_curve(MODEL, X_train, y_train); ``` We see a typical picture when the training score is decreasing with the number of samples given to model and cross-val scorr is increasing.<br> But still, there is gap between them, so our model will be improved with larger number of provided samples.<br> Also it is worth noticing that the variance of the cross-val scores is pretty low, which tells us that our model gives pretty stable predictions. ## Hyperparameter Tuning One of the key characteristics of our model is its simplicity, so we have only on parameter to adjust: [alpha](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html), which is regularization strength for ridge regularization.<br> Let's try to see how what is the best value for it, visually: ``` plt.figure(figsize=(10, 5)) viz = ValidationCurve(MODEL, param_name='alpha', cv=CV, param_range=np.logspace(-1, 1, 10), logx=True, scoring=SCORING, n_jobs=-1) viz.fit(X_train, y_train) viz.poof() ``` As we see, we have a best value for our model located at the maximum of the cross-val curve.<br> The variance is still low, as on learning curve, which is a good indicator.<br> Now let's calculate the best ```alpha``` using simple grid search: ``` params = { 'alpha': np.logspace(-1, 1, 10) } gs = GridSearchCV(MODEL, param_grid=params, verbose=10, n_jobs=-1, cv=CV, scoring=SCORING) gs.fit(X_train, y_train) print('Best alpha:', gs.best_params_['alpha']) MODEL = Ridge(alpha=gs.best_params_['alpha'], random_state=RANDOM_SEED) train_and_cv(MODEL, X_train, y_train, X_test, y_test); ``` ## Feature Engineering ### Winery + Designation Let's see how adding the combination of the **Winery** and **Designation** (designation is a part of a winery, so they are expected to processed together) affect our model.<br> Specific winery and its designation may define a specific winery "factory" where the wine is produced and may affect the wine quality. ``` df_full['winery_designation'] = df_full.winery + ' / ' + df_full.designation vc = df_full.winery_designation.value_counts() df_full.winery_designation = df_full.winery_designation.replace(vc[vc < 2].index, 'Other') df_full.winery_designation = df_full.winery_designation.fillna('Other') print('Number of unique winery + designation:', len(df_full.winery_designation.unique())) X_train, X_test, y_train, y_test = prepare_data(df_full, ['country', 'variety', 'location', 'winery_designation']) X_train.shape, X_test.shape, y_train.shape, y_test.shape train_and_cv(MODEL, X_train, y_train, X_test, y_test); ``` Our assumption was correct and performance of the model is improved, so let's keep this feature. ### Year (Vintage) Turns out, we have a year inside the title: ``` df_full.title.head() ``` Wine year is the when the grape was collected and this is a categorical feature in our case.<br> Year can tell about the weather and other conditions related to the specific harvest and often affect the quality of the wine. ``` def extract_year(title): matches = re.findall(r'\d{4}', title) return next(filter(lambda x: 1000 < x <= 2018, map(int, matches)), 0) df_full['year'] = df.title.apply(extract_year) df_full.year = df_full.year.fillna(0) print('Number of unique years:', len(df_full.year.unique())) X_train, X_test, y_train, y_test = prepare_data(df_full, ['country', 'variety', 'location', 'winery_designation', 'year']) X_train.shape, X_test.shape, y_train.shape, y_test.shape train_and_cv(MODEL, X_train, y_train, X_test, y_test); ``` And again, the performance of the model is improved, we will keep this feature. ### Winery + Year We can pretend that the winery and year together may define a quality of the wine - for example, if for some winery the weather was good and it had a good financial status in a specific year, we can expect better grape quality from it. ``` df_full['winery_year'] = df_full.winery + ' / ' + df_full.year.astype(str) vc = df_full.winery_year.value_counts() df_full.winery_year = df_full.winery_year.replace(vc[vc < 2].index, 'Other') df_full.winery_year = df_full.winery_year.fillna('Other') print('Number of unique winery + year:', len(df_full.winery_year.unique())) X_train, X_test, y_train, y_test = prepare_data(df_full, ['country', 'variety', 'location', 'winery_designation', 'year', 'winery_year']) X_train.shape, X_test.shape, y_train.shape, y_test.shape train_and_cv(MODEL, X_train, y_train, X_test, y_test); ``` We've got minor improvement in holdout score, but worse CV score.<br> Since this feature also inroduce a lot of dummy values, let's not add it to the model. ## Retrain the Best Model ``` X_train, X_test, y_train, y_test = prepare_data(df_full, ['country', 'variety', 'location', 'winery_designation', 'year']) X_train.shape, X_test.shape, y_train.shape, y_test.shape train_and_cv(MODEL, X_train, y_train, X_test, y_test) ``` Now our model is ready to be used or improved further.<br> We can train it on the whole dataset (train+test) to get better results in real use, as our learning curve suggested. ## Conclusions We've reached our goal on building a model which may predict the wine rating based on the wine features and textual description.<br> Models of such type can be used in the wine industry to predict wine ratings and augment predictions of an experts to find and resolve their biases. Also it is possible to deploy this model in a form of a web or mobile application to allow wine buyers to get predictions for the random wines in the local stores (they can use wine description on the bottle in this case). However, the following things can be tried to improve the model: * we can approach a problem from the classification perspective and build a classifier instead of regressor, or implement an ordered regression; * the words in the Description can be stemmed, we can use word2vec, GloVe, LDA to improve features extracted from this field; * if we need to retrain the model often, better to switch to SGDRegressor, which will be way faster for this task; * we can apply feature selection to remove noisy and unnecessary features which may improve the accuracy and speed of the model; * it is possible to get the data apart from the dataset provided, to add some other features - for example, the wheater conditions during specific year in a specific location may greatly affect the wine quality. Choose the best wines and **drink responsibly**! :)
github_jupyter
import pandas as pd import numpy as np from matplotlib import pyplot as plt import seaborn as sns from scipy import stats import plotly.offline as py import warnings import pycountry from statsmodels.graphics.gofplots import qqplot from wordcloud import WordCloud, STOPWORDS warnings.filterwarnings('ignore') import re from sklearn.linear_model import Ridge, RidgeCV from sklearn.model_selection import train_test_split, cross_val_score, KFold, GridSearchCV from sklearn.metrics import mean_squared_error, mean_absolute_error from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.preprocessing import StandardScaler from scipy.sparse import csr_matrix, hstack from yellowbrick.model_selection import ValidationCurve, LearningCurve py.init_notebook_mode(connected=True) import plotly.graph_objs as go RANDOM_SEED=17 df = pd.read_csv('data/winemag-data-130k-v2.csv', index_col=0) df.info(memory_usage='deep') df.head() df.nunique() plt.figure(figsize=(13, 10)) ax = sns.countplot(y=df.country, order=df.country.value_counts().index, palette='tab10') for p, label in zip(ax.patches, df.country.value_counts()): ax.annotate("{0:,d}".format(label), (p.get_width() + 50, p.get_y() + 0.7)) ax.set_title('Number of wine reviews per country', fontsize=18); countries = df.groupby('country').size().reset_index() countries.columns = ['name', 'size'] countries.name = countries.name.replace({ # making the country names compatible with pycountry 'England': 'United Kingdom', 'Czech Republic': 'United Kingdom', 'Macedonia': 'Macedonia, Republic of', 'Moldova': 'Moldova, Republic of', 'US': 'United States' }) data = pd.DataFrame(index=countries.index) data['name'] = countries.name data['size'] = countries['size'] data['code'] = countries.apply(lambda x: pycountry.countries.get(name=x['name']), axis=1) data['code'] = data.code.apply(lambda x: x.alpha_3 if x else None) data = data.dropna() choropleth_data = [dict( type='choropleth', locations=data['code'], z=np.log1p(data['size']), #showscale=False, text=data['name'], marker=dict( line=dict( color='rgb(180,180,180)', width=0.5 )), )] layout = dict( title='Number of wine reviews per country, log-transformed', geo=dict( showframe=False, showcoastlines=True, projection=dict( type='natural earth' ) )) fig = dict(data=choropleth_data, layout=layout) py.iplot(fig, validate=False) top_rated_countries = df[['country', 'points']].groupby('country').mean().reset_index().sort_values('points', ascending=False).country[:10] data = df[df.country.isin(top_rated_countries)] plt.figure(figsize=(15, 7)) ax = sns.violinplot(x='country', y='points', data=data, order=top_rated_countries, palette='tab10') ax.set_title('Top 10 countries with highest average rating', fontsize=18); vc = df.country.value_counts() df['trans_country'] = df.country.replace(vc[vc < 100].index, 'Other') top_rated_countries = df[['trans_country', 'points']].groupby('trans_country').mean().reset_index().sort_values('points', ascending=False).trans_country[:10] top_rated_countries_data = df[df.trans_country.isin(top_rated_countries)] plt.figure(figsize=(15, 7)) ax = sns.violinplot(x='trans_country', y='points', data=top_rated_countries_data, order=top_rated_countries, palette='tab10') ax.set_title('Top 10 countries with highest average rating', fontsize=18); df[['trans_country', 'province', 'region_1', 'region_2']].head() df[['trans_country', 'province', 'region_1', 'region_2']].nunique() print('Countries with Region 2:', df[~df.region_2.isna()].trans_country.unique()) df['location'] = df.apply(lambda x: ' / '.join([y for y in [str(x['trans_country']), str(x['province']), str(x['region_1']), str(x['region_2'])] if y != 'nan']), axis=1) df.location.head() df_top_locations = df[df.location.isin(df.location.value_counts().index[:10])] plt.figure(figsize=(12, 10)) ax = sns.violinplot(y='location', x='points', data=df_top_locations, palette='tab10'); ax.set_title('Wine rating distribution over top 10 locations with highest average rating', fontsize=18); df[['region_1', 'title']].head() def extract_region_1(row): if row.region_1 == 'nan': return row.region_1 if not row.title.endswith(')'): return None return row.title[row.title.rindex('(')+1:-1] df.region_1 = df.apply(extract_region_1, axis=1) df[['region_1', 'title']].head() df['location'] = df.apply(lambda x: ' / '.join([y for y in [str(x['trans_country']), str(x['province']), str(x['region_1']), str(x['region_2'])] if y != 'nan']), axis=1) df.location.head() vc = df.location.value_counts() df.location = df.location.replace(vc[vc < 2].index, 'Other') plt.figure(figsize=(15, 5)) data = df[~df.price.isna()] plt.scatter(range(data.shape[0]), np.sort(data.price.values)[::-1]) plt.title("Distribution of wine prices", fontsize=18) plt.ylabel('Price'); plt.figure(figsize=(15, 3)) series_price = df[~df.price.isna()].price.apply(np.log1p) ax = sns.distplot(series_price); ax.set_title("Distribution of wine prices", fontsize=18) ax.set_ylabel('Price (log1p)') ax.set_xlabel(''); print('Shapiro-Wilk test:', stats.shapiro(series_price)) print('Kolmogorov-Smirnov test:', stats.kstest(series_price, cdf='norm')) print('Skeweness:', series_price.skew()) print('Kurtosis:', series_price.kurt()) plt.figure(figsize=(15, 5)) ax = sns.regplot(x='points', y='price', data=df, fit_reg=False, x_jitter=True) ax.set_title('Correlation between the wine price and points given', fontsize=18); plt.figure(figsize=(13, 7)) data = df[['country', 'price']].groupby('country').mean().reset_index().sort_values('price', ascending=False) ax = sns.barplot(y='country', x='price', data=data, palette='tab10') for p, label in zip(ax.patches, data.price): if np.isnan(label): continue ax.annotate('{0:.2f}'.format(label), (p.get_width() + 0.2, p.get_y() + 0.5)) ax.set_title('Top countries with the most expensive average wine prices'); plt.figure(figsize=(15, 5)) sns.violinplot(x='country', y='price', data=top_rated_countries_data, order=top_rated_countries, palette='tab10'); df['trans_price'] = df.price.apply(np.log1p) top_rated_countries = df[['trans_country', 'points']].groupby('trans_country').mean().reset_index().sort_values('points', ascending=False).trans_country[:10] top_rated_countries_data = df[df.trans_country.isin(top_rated_countries)] plt.figure(figsize=(15, 5)) sns.violinplot(x='trans_country', y='trans_price', data=top_rated_countries_data, order=top_rated_countries, palette='tab10'); plt.figure(figsize=(15, 5)) ax = sns.regplot(x='points', y='trans_price', data=df, fit_reg=False, x_jitter=True) ax.set_title('Correlation between the wine price (log) and points given', fontsize=18); df_top_varieties = df[df.variety.isin(df.variety.value_counts().index[:10])] plt.figure(figsize=(13, 5)) ax = sns.countplot(y=df_top_varieties.variety, order=df_top_varieties.variety.value_counts().index, palette='tab10') for p, label in zip(ax.patches, df_top_varieties.variety.value_counts()): ax.annotate("{0:,d}".format(label), (p.get_width() + 50, p.get_y() + 0.5)) ax.set_title('Number of wines per variety', fontsize=18); plt.figure(figsize=(14, 10)) ax = sns.violinplot(y='variety', x='points', data=df_top_varieties, palette='tab10', order=df_top_varieties.variety.value_counts().index) ax.set_title('Wine rating distribution over top 10 varietes by wine count', fontsize=18); top_rated_varietes = df[['variety', 'points']].groupby('variety').mean().reset_index().sort_values('points', ascending=False).variety[:10] top_rated_varietes_data = df[df.variety.isin(top_rated_varietes)] plt.figure(figsize=(15, 5)) ax = sns.violinplot(x='variety', y='points', data=top_rated_varietes_data, order=top_rated_varietes, palette='tab10'); ax.set_xticklabels(ax.get_xticklabels(), rotation=90); vc = df.variety.value_counts() df['trans_variety'] = df.variety.replace(vc[vc < 2].index, 'Other') top_rated_varietes = df[['trans_variety', 'points']].groupby('trans_variety').mean().reset_index().sort_values('points', ascending=False).trans_variety[:10] top_rated_varietes_data = df[df.trans_variety.isin(top_rated_varietes)] plt.figure(figsize=(15, 5)) ax = sns.violinplot(x='trans_variety', y='points', data=top_rated_varietes_data, order=top_rated_varietes, palette='tab10'); ax.set_xticklabels(ax.get_xticklabels(), rotation=90); df.title.head(10) stopwords = set(STOPWORDS) stopwords.update(['wine', 'a', 'about', 'above', 'across', 'after', 'again', 'against', 'all', 'almost', 'alone', 'along', 'already', 'also', 'although', 'always', 'among', 'an', 'and', 'another', 'any', 'anybody', 'anyone', 'anything', 'anywhere', 'are', 'area', 'areas', 'around', 'as', 'ask', 'asked', 'asking', 'asks', 'at', 'away', 'b', 'back', 'backed', 'backing', 'backs', 'be', 'became', 'because', 'become', 'becomes', 'been', 'before', 'began', 'behind', 'being', 'beings', 'best', 'better', 'between', 'big', 'both', 'but', 'by', 'c', 'came', 'can', 'cannot', 'case', 'cases', 'certain', 'certainly', 'clear', 'clearly', 'come', 'could', 'd', 'did', 'differ', 'different', 'differently', 'do', 'does', 'done', 'down', 'down', 'downed', 'downing', 'downs', 'during', 'e', 'each', 'early', 'either', 'end', 'ended', 'ending', 'ends', 'enough', 'even', 'evenly', 'ever', 'every', 'everybody', 'everyone', 'everything', 'everywhere', 'f', 'face', 'faces', 'fact', 'facts', 'far', 'felt', 'few', 'find', 'finds', 'first', 'for', 'four', 'from', 'full', 'fully', 'further', 'furthered', 'furthering', 'furthers', 'g', 'gave', 'general', 'generally', 'get', 'gets', 'give', 'given', 'gives', 'go', 'going', 'good', 'goods', 'got', 'great', 'greater', 'greatest', 'group', 'grouped', 'grouping', 'groups', 'h', 'had', 'has', 'have', 'having', 'he', 'her', 'here', 'herself', 'high', 'high', 'high', 'higher', 'highest', 'him', 'himself', 'his', 'how', 'however', 'i', 'if', 'important', 'in', 'interest', 'interested', 'interesting', 'interests', 'into', 'is', 'it', 'its', 'itself', 'j', 'just', 'k', 'keep', 'keeps', 'kind', 'knew', 'know', 'known', 'knows', 'l', 'large', 'largely', 'last', 'later', 'latest', 'least', 'less', 'let', 'lets', 'like', 'likely', 'long', 'longer', 'longest', 'm', 'made', 'make', 'making', 'man', 'many', 'may', 'me', 'member', 'members', 'men', 'might', 'more', 'most', 'mostly', 'mr', 'mrs', 'much', 'must', 'my', 'myself', 'n', 'necessary', 'need', 'needed', 'needing', 'needs', 'never', 'new', 'new', 'newer', 'newest', 'next', 'no', 'nobody', 'non', 'noone', 'not', 'nothing', 'now', 'nowhere', 'number', 'numbers', 'o', 'of', 'off', 'often', 'old', 'older', 'oldest', 'on', 'once', 'one', 'only', 'open', 'opened', 'opening', 'opens', 'or', 'order', 'ordered', 'ordering', 'orders', 'other', 'others', 'our', 'out', 'over', 'p', 'part', 'parted', 'parting', 'parts', 'per', 'perhaps', 'place', 'places', 'point', 'pointed', 'pointing', 'points', 'possible', 'present', 'presented', 'presenting', 'presents', 'problem', 'problems', 'put', 'puts', 'q', 'quite', 'r', 'rather', 'really', 'right', 'right', 'room', 'rooms', 's', 'said', 'same', 'saw', 'say', 'says', 'second', 'seconds', 'see', 'seem', 'seemed', 'seeming', 'seems', 'sees', 'several', 'shall', 'she', 'should', 'show', 'showed', 'showing', 'shows', 'side', 'sides', 'since', 'small', 'smaller', 'smallest', 'so', 'some', 'somebody', 'someone', 'something', 'somewhere', 'state', 'states', 'still', 'still', 'such', 'sure', 't', 'take', 'taken', 'than', 'that', 'the', 'their', 'them', 'then', 'there', 'therefore', 'these', 'they', 'thing', 'things', 'think', 'thinks', 'this', 'those', 'though', 'thought', 'thoughts', 'three', 'through', 'thus', 'to', 'today', 'together', 'too', 'took', 'toward', 'turn', 'turned', 'turning', 'turns', 'two', 'u', 'under', 'until', 'up', 'upon', 'us', 'use', 'used', 'uses', 'v', 'very', 'w', 'want', 'wanted', 'wanting', 'wants', 'was', 'way', 'ways', 'we', 'well', 'wells', 'went', 'were', 'what', 'when', 'where', 'whether', 'which', 'while', 'who', 'whole', 'whose', 'why', 'will', 'with', 'within', 'without', 'work', 'worked', 'working', 'works', 'would', 'x', 'y', 'year', 'years', 'yet', 'you', 'young', 'younger', 'youngest', 'your', 'yours', 'z']) wordcloud = WordCloud(background_color='white', stopwords=stopwords, max_words=500, max_font_size=200, width=2000, height=800, random_state=RANDOM_SEED).generate(' '.join(df[df.points < 83].description.str.lower())) plt.figure(figsize=(15, 7)) plt.imshow(wordcloud) plt.title("Low Rated Wines Description Word Cloud", fontsize=20) plt.axis('off'); wordcloud = WordCloud(background_color='white', stopwords=stopwords, max_words=500, max_font_size=200, width=2000, height=800, random_state=RANDOM_SEED).generate(' '.join(df[df.points > 97].description.str.lower())) plt.figure(figsize=(15, 7)) plt.imshow(wordcloud) plt.title("High Rated Wines Description Word Cloud", fontsize=20) plt.axis('off'); plt.figure(figsize=(15, 5)) sns.distplot(df.points, kde=False); print('Shapiro-Wilk test:', stats.shapiro(df.points)) print('Kolmogorov-Smirnov test:', stats.kstest(df.points, cdf='norm')) print('Skeweness:', df.points.skew()) print('Kurtosis:', df.points.kurt()) plt.rcParams['figure.figsize'] = (7, 7) qqplot(df.points, line='r'); SCORING = 'neg_mean_squared_error' MODEL = Ridge(random_state=RANDOM_SEED) CV = KFold(n_splits=10, shuffle=True, random_state=RANDOM_SEED) df_full = df.copy(deep=True) df_full = df_full.drop(['country', 'price', 'taster_name', 'taster_twitter_handle', 'variety', 'province', 'region_1', 'region_2'], axis=1) df_full.columns = [x.replace('trans_', '') for x in df_full.columns] df_full.info() # Fill missing countries with "Other" df_full.country = df_full.country.fillna('Other') # Fill missing locations with "Other" df_full.location = df_full.location.fillna('Other') # Remove samples with missing prices since there are not so much of them and it's and important feature df_full = df_full[~df_full.price.isna()] df_full.info() df_train, df_test, y_train, y_test = train_test_split(df_full.drop(['points'], axis=1), df_full.points, test_size=0.25, random_state=RANDOM_SEED) df_train.shape, df_test.shape, y_train.shape, y_test.shape categorical_features = ['country', 'variety', 'location'] for feature in categorical_features: categorical = pd.Categorical(df_train[feature].unique()) df_train[feature] = df_train[feature].astype(categorical) df_test[feature] = df_test[feature].astype(categorical) X_train_cat = pd.get_dummies(df_train[categorical_features], sparse=True) X_test_cat = pd.get_dummies(df_test[categorical_features], sparse=True) X_train_cat.shape, X_test_cat.shape tv = TfidfVectorizer(stop_words=stopwords, max_features=10000) X_train_desc = tv.fit_transform(df_train.description) X_test_desc = tv.transform(df_test.description) X_train_desc.shape, X_test_desc.shape ss = StandardScaler() df_train.price = ss.fit_transform(df_train[['price']]) df_test.price = ss.transform(df_test[['price']]) X_train = csr_matrix(hstack([ df_train[['price']], X_train_cat, X_train_desc, ])) X_test = csr_matrix(hstack([ df_test[['price']], X_test_cat, X_test_desc, ])) X_train.shape, X_test.shape def prepare_data(df_full, categorical_features): df_train, df_test, y_train, y_test = train_test_split(df_full.drop(['points'], axis=1), df_full.points, test_size=0.25, random_state=RANDOM_SEED) df_train.shape, df_test.shape, y_train.shape, y_test.shape print('processing categorical features') for feature in categorical_features: categorical = pd.Categorical(df_train[feature].unique()) df_train[feature] = df_train[feature].astype(categorical) df_test[feature] = df_test[feature].astype(categorical) print('preparing dummies') X_train_cat = pd.get_dummies(df_train[categorical_features], sparse=True) X_test_cat = pd.get_dummies(df_test[categorical_features], sparse=True) print('extracting word vectors') tv = TfidfVectorizer(stop_words=stopwords, max_features=10000) X_train_desc = tv.fit_transform(df_train.description) X_test_desc = tv.transform(df_test.description) X_train_desc.shape, X_test_desc.shape print('scaling') ss = StandardScaler() df_train.price = ss.fit_transform(df_train[['price']]) df_test.price = ss.transform(df_test[['price']]) df_train.describe() print('combining features') X_train = csr_matrix(hstack([ df_train[['price']], X_train_cat, X_train_desc, ])) X_test = csr_matrix(hstack([ df_test[['price']], X_test_cat, X_test_desc, ])) return X_train, X_test, y_train, y_test X_train, X_test, y_train, y_test = prepare_data(df_full, ['country', 'variety', 'location']) X_train.shape, X_test.shape, y_train.shape, y_test.shape def train_and_cv(model, X_train, y_train, X_test, y_test): cvs = cross_val_score(model, X_train, y_train, cv=CV, scoring=SCORING, n_jobs=-1) print('MSE and STD on CV:\t', -cvs.mean(), cvs.std()) model.fit(X_train, y_train) print('MSE on holdout:\t\t', mean_squared_error(MODEL.predict(X_test), y_test)) return model train_and_cv(MODEL, X_train, y_train, X_test, y_test); def plot_learning_curve(model, X_train, y_train): plt.figure(figsize=(10, 5)) viz = LearningCurve(model, cv=CV, train_sizes=np.linspace(.1, 1.0, 10), scoring=SCORING, n_jobs=-1) viz.fit(X_train, y_train) viz.poof() plot_learning_curve(MODEL, X_train, y_train); plt.figure(figsize=(10, 5)) viz = ValidationCurve(MODEL, param_name='alpha', cv=CV, param_range=np.logspace(-1, 1, 10), logx=True, scoring=SCORING, n_jobs=-1) viz.fit(X_train, y_train) viz.poof() params = { 'alpha': np.logspace(-1, 1, 10) } gs = GridSearchCV(MODEL, param_grid=params, verbose=10, n_jobs=-1, cv=CV, scoring=SCORING) gs.fit(X_train, y_train) print('Best alpha:', gs.best_params_['alpha']) MODEL = Ridge(alpha=gs.best_params_['alpha'], random_state=RANDOM_SEED) train_and_cv(MODEL, X_train, y_train, X_test, y_test); df_full['winery_designation'] = df_full.winery + ' / ' + df_full.designation vc = df_full.winery_designation.value_counts() df_full.winery_designation = df_full.winery_designation.replace(vc[vc < 2].index, 'Other') df_full.winery_designation = df_full.winery_designation.fillna('Other') print('Number of unique winery + designation:', len(df_full.winery_designation.unique())) X_train, X_test, y_train, y_test = prepare_data(df_full, ['country', 'variety', 'location', 'winery_designation']) X_train.shape, X_test.shape, y_train.shape, y_test.shape train_and_cv(MODEL, X_train, y_train, X_test, y_test); df_full.title.head() def extract_year(title): matches = re.findall(r'\d{4}', title) return next(filter(lambda x: 1000 < x <= 2018, map(int, matches)), 0) df_full['year'] = df.title.apply(extract_year) df_full.year = df_full.year.fillna(0) print('Number of unique years:', len(df_full.year.unique())) X_train, X_test, y_train, y_test = prepare_data(df_full, ['country', 'variety', 'location', 'winery_designation', 'year']) X_train.shape, X_test.shape, y_train.shape, y_test.shape train_and_cv(MODEL, X_train, y_train, X_test, y_test); df_full['winery_year'] = df_full.winery + ' / ' + df_full.year.astype(str) vc = df_full.winery_year.value_counts() df_full.winery_year = df_full.winery_year.replace(vc[vc < 2].index, 'Other') df_full.winery_year = df_full.winery_year.fillna('Other') print('Number of unique winery + year:', len(df_full.winery_year.unique())) X_train, X_test, y_train, y_test = prepare_data(df_full, ['country', 'variety', 'location', 'winery_designation', 'year', 'winery_year']) X_train.shape, X_test.shape, y_train.shape, y_test.shape train_and_cv(MODEL, X_train, y_train, X_test, y_test); X_train, X_test, y_train, y_test = prepare_data(df_full, ['country', 'variety', 'location', 'winery_designation', 'year']) X_train.shape, X_test.shape, y_train.shape, y_test.shape train_and_cv(MODEL, X_train, y_train, X_test, y_test)
0.604282
0.951774
## Example code of multiprocessing pool object failing with error (error will be found in terminal of) libc++abi.dylib: terminating with uncaught exception of type std::runtime_error: Couldn't close file # Import libs ``` import time import math import multiprocessing as mp from functools import partial from scipy import stats import numpy as np ``` ## Dummy dump data for code brivity ``` data = [[-1.105280555702782, -0.17439617189603107, 0.05005734630226715, 0.5235379869549062, -0.1477803698812845, -0.46271142578117913], [-0.05885403735978791, -1.0682207002148314, -0.06135907736680796, -0.13828445709405934, -1.0444747880537166, -0.599298079872161], [0.9885348141881728, -0.5460212430344279, 1.5301031858037542, -0.9712219466773333, 0.4942560425811777, -1.8365077902534435], [-0.6037399470269699, 0.6840221582910677, -1.2961388432607295, -0.7730389870130335, 1.0718275668764454, 0.7194801730310086], [-0.1931389171612762, 0.46157685475224125, -1.355327161350741, -1.1690783862414569, 1.0002596757265272, 0.2981787719848743], [0.5089944259437953, -1.556071607882815, 1.9476875009825, -1.7499790635589294, -0.7264959372645678, 1.1797569295568469], [0.3132927168428071, 1.064997428813059, -0.6318275312991293, 0.14658872655261196, -0.8168128316202363, -0.322653883378593], [0.04415364740337862, -2.1119482527087157, -0.062438281288370936, 1.2797008306909692, 0.013561624990343996, -0.2756593349187367], [0.9848408453831758, -2.335729791751807, -0.5534586982933313, 1.7572470676062295, -0.4083057184190674, 0.6071862283695229], [-0.45663630788647197, 0.7200854525304925, 1.6595718983722365, 5.0661553738564945e-05, 1.2635063125006525, -1.0993166906758245], [1.0359138564588737, 0.6026203859330609, -0.8266938304714567, 3.906125043518653, 0.064711760397289, 1.9692599025162034], [0.5435151894496619, -1.2976321754508675, 1.8777666933579515, -1.8728360042827392, -1.0620111830183903, 1.4897480097981988], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [0.5034932198323815, 2.045084020929738, 0.9983477551573342, 0.22722394602292703, 0.719665130298287, -0.20624864303756138], [2.431880428057342, 0.6769843249322764, 0.8594884650197583, 0.43859209395714394, 0.24842861284538237, -0.5426885697956173], [0.5429835200093737, -1.297959160338576, 1.8778194082441986, -1.873599364011705, -1.061855175926826, 1.4893305446175078], [-0.585701739967722, 1.24747772752694, -1.4527741388637423, -1.1486383189634235, 0.30884417339818787, 1.3923682027723339], [0.7210778161557181, 1.7741505259430754, 1.0168567935064885, 1.032527537592702, 1.1032674938802989, -0.020716668937283956], [-0.023401930159563454, 1.373597984925255, -0.6647609788485772, 0.8178527022851653, -0.9305239581288983, 0.7025932323901457], [0.01011757838433386, 0.5499931422214072, 1.5898209807704355, -0.3283172100802974, 1.1510199317920775, -1.4552778676312659], [-1.1298189947658828, -0.016371910540320778, 0.07734359174755166, 0.16377619812754576, -0.27028686045826195, -0.7729034619096025], [-0.0679754343478575, -1.496399992293454, -0.015377140493243444, 0.35447102286597393, -0.5699087436162079, -0.6130241192268077], [0.9924775296269923, -0.9895570677942931, 1.5813472008214047, -0.44270682942013573, 0.9970750278865798, -1.8678712905648072], [-0.07018036993830022, -0.914823467481481, -0.1051916005356062, -0.2711105615017444, -1.2611259677917053, -0.41694888805588026], [0.3581948420282566, -0.75560806594209, -1.3341201474622957, -0.9985148585501945, 0.7695815260793727, 0.42499022265807745], [-0.45387590648906295, 0.7217831418700824, 1.6592982052941034, 0.0040139874530455665, 1.2626963314091562, -1.0971492321370693], [1.7049033201175539, -1.250798436159996, 0.1821930982913399, -0.19370018392024885, 3.331165828969625, -0.3761226079792889], [0.5189348503442601, -1.4786856043146548, 1.9267216766806121, -1.7875130329726303, -0.827179255064121, 1.2725588275601716], [-1.1105174995017477, 0.12579870333057744, 0.038902121322495466, 0.0967770847466375, -0.45468656812059277, -0.6023575652719363], [-0.8451045778449146, -0.18767942782154254, 1.9817701466968831, -0.482886585336503, -0.15520630399083815, 2.5410184097749453], [0.5429835200093737, -1.297959160338576, 1.8778194082441986, -1.873599364011705, -1.061855175926826, 1.4893305446175078], [-0.3467738190483387, 0.4057847710510829, 1.6089626469520777, 0.8327520426544388, 1.5395663045859844, -0.4737023010484321], [-0.09398954003744588, -1.321465166225436, 0.00706035845871812, -0.020361051961328657, -0.7164238591103511, -0.9031256672043615], [-0.8351371201295263, -1.1637215877943656, -0.027411998488218053, 2.4153623798935837, 0.8198289895035742, 0.43183055701164563], [-1.1308477669026633, -0.017004621236351623, 0.07744559423642032, 0.16229910886871032, -0.2699849891510557, -0.7737112505719289], [2.1992841327256465, 0.9025415537094064, 0.7792164224868684, -0.16329649453848197, -0.18895043748335497, -0.28573592085079147], [0.8507857315040394, -0.8741739768773626, -1.416422914097263, -1.3156896180362718, 0.6006305703726948, 0.13181453370000182], [0.8111334330259322, 2.3643463003893124, 0.7769328979694332, 0.6423784974425581, 0.23616153637146164, 0.7586767101203021], [0.31411825866250903, 1.0710781902088449, -0.6334717065825682, 0.14372311592390297, -0.824699768260782, -0.3153594863421865], [0.7406953956784234, -0.8934923053779887, -1.345688811236116, -1.707624123787499, 0.6743188600473297, -0.3942968228133822], [-0.45612485537395925, 0.7237360244226224, 1.6585859817894615, -0.0016398772003042115, 1.2587801226302524, -1.094936695462614], [0.995997641756315, -1.8432898466941217, -1.443011132166486, -0.2018798643238507, 1.5802354580966909, 0.12012981656309042], [-0.6690299526257079, -1.4966187812400924, -0.14975587075735658, 3.236771009447906, 1.0452778459322931, 1.1090672428837416], [-0.4732810978580757, 0.5973668756957324, 1.6927548987066772, 0.05791266106959538, 1.4226848385790527, -1.2465273736637836], [1.50124289557179, 0.2760302519548516, -0.7622710474142799, -0.3249885965181798, -1.779475986411507, -0.32686742525530127], [0.11403874762298992, 1.5115804840448708, -0.6933733615435567, 0.976332453078738, -1.0441778861440767, 0.8742587533776104], [-0.8160747644257802, -1.3299126573638629, -0.06049459253998337, 2.4413449216881493, 0.9512196112219389, 0.30551317085027163], [0.4726456986044836, 1.817867892368189, 1.0597846572595242, 0.33430148296941736, 1.0143715154093722, -0.4788139596033137], [0.16897600090635947, 0.14136536947261572, 1.488176053491309, 0.5344900452245421, 1.476362289932175, -0.868729581506382], [0.7618107710434899, -0.7379610454064316, -1.3877428628265356, -1.780919555504298, 0.47258997887957577, -0.20772368041194267], [-1.1346521745174962, -0.05498063525968947, 0.08781291716697856, 0.18273993112367304, -0.2199854108166052, -0.8191968134614424], [0.6064010368685079, -2.569275634712709, -0.27250703704315216, 1.606598208556567, 0.02503603939055407, 0.09641643060278472], [-1.063719217467276, -0.2684967211025073, 0.006098109484078005, 0.8688862288291607, -0.10359377081094741, -0.07540553159061773], [-0.6754420852692551, 0.5864698726478227, -1.2740444196044614, -0.8371327341670264, 1.1661919183611427, 0.5994323475662167], [0.3226016761531959, -2.495415499474011, -0.23575916160986232, 2.548081443127418, 0.26242833455358644, 0.8444591051119901], [-0.4452251479275592, 0.5320574855867011, -1.2419247258625525, -0.36412502232991517, 1.3674975605508222, 0.5464570196705528], [-0.40780080744242503, 0.5179768501719592, 1.6464241040222944, 0.4376032282117716, 1.4598529297568217, -0.840440393074131], [-0.43468999400386327, 0.1499893354228728, -0.0699963018502084, 0.020837376046106717, -0.7186429692331538, -0.7158042670032141], [-1.1209073425021978, 0.060381382331808335, 0.05647976993453201, 0.12476513945500924, -0.3706683069506089, -0.6809093525686053], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [-1.1105174995017477, 0.12579870333057744, 0.038902121322495466, 0.0967770847466375, -0.45468656812059277, -0.6023575652719363], [-0.013490494124725055, -0.649752212133158, -1.3395250646499626, 0.11294034382996132, 2.1938896289783028, 0.11867854412293145], [0.3573161914154756, -1.0963069593174937, -0.1012413729131282, -0.8638092909035395, -1.2718252438290103, -1.2859019169624208], [-1.1232702387254838, 0.005473797601041048, 0.0716992258410479, 0.16022720834652024, -0.2966500815716152, -0.7465122221925814], [-0.6360001381085819, -0.027273868380563816, -0.035051278238294056, -0.22934480262159324, -0.5862479044784267, -0.9376198930716371], [0.543137835829891, -1.2978642537341716, 1.8778041078708687, -1.8733778006228798, -1.0619004566229069, 1.489451712916857], [-0.10570885890948976, 1.2644581449576686, -0.56681124282265, 0.5435183548961574, -0.7183383510039941, 0.07080225835833197], [0.7301377079958901, -0.9712579353637675, -1.3246617854409064, -1.6709764079290994, 0.7751833006312072, -0.48758339401410183], [0.4546961449644032, -1.5557641263062716, -0.1854563667418634, 0.24959683745763983, -0.9112789992944788, -0.33666589047739437], [-0.7074603633008892, -0.982822103516796, -0.15752391817477848, 2.0641494959514057, 0.5355602692194376, 0.2985740714268665], [-1.1105174995017477, 0.12579870333057744, 0.038902121322495466, 0.0967770847466375, -0.45468656812059277, -0.6023575652719363], [2.4916426662160527, -0.585720679646081, -0.9771895688351482, 2.898204719429183, -0.8054029501060624, 1.3253553741393038], [-1.121900729003983, 0.04195231194901321, 0.06157332240114422, 0.13629041123374575, -0.34593519089616975, -0.7029385335090628], [-1.1087036194008162, 0.13915934943123626, 0.03528954015713245, 0.09048076641074626, -0.4720157416260161, -0.5863303195081632], [-1.1332641510240422, -0.03630885705389676, 0.08268023654563605, 0.17178127078462574, -0.24483432470448874, -0.7968577647901166], [0.024911694942810758, 1.4745840974553652, -0.6895314959852692, 0.8354141622260237, -1.0424670756872456, 0.8255256474982533], [-0.838532681739841, -0.5698581265860995, -0.1284427675680493, 1.4492029623426073, 0.11179679223194279, 0.21601047055267483], [-1.0328130326195135, 0.7203769000972763, -0.12208674534255967, -0.18910378815552456, -1.227533057822033, 0.1107340407777897], [-1.1105174995017477, 0.12579870333057744, 0.038902121322495466, 0.0967770847466375, -0.45468656812059277, -0.6023575652719363], [0.6138468543046278, 2.665314998678962, 0.8325594090191702, -0.015830849210834226, -0.07040626231229584, 0.5391251714135192], [1.1726894338735765, 1.3811945309734444, 0.9274047783295511, 0.7069350424096101, 1.181091610352484, -0.5723509499153648], [0.9271248031806837, -0.8625940055098188, -1.4951449472231833, -1.311082820510442, 0.5196815344750043, 0.22046406142084943], [-0.5811882809674394, 0.4891397561697808, -1.3132379837727934, -0.39022665462819955, 1.2438002895623228, 0.9856138472785212], [-0.13354306879967887, -0.9766760697386616, -0.01911029686742457, -0.5441482947769906, -1.103382397906622, -0.991366558127917], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [0.10071773885731712, 0.9512925766411268, -0.618348925921131, 1.2278023025278146, -0.3900108686417787, 0.2762813344485754], [-1.0956595952181118, 0.2184541146271821, 0.014015994746824727, 0.057402879873099874, -0.5736097447851491, -0.49109148466833236], [-1.0377204541438116, -0.05650773519490712, -0.0514253041105858, 0.7637475969049969, -0.38008046206035073, 0.17874956265749775], [-1.111977587975416, 0.11932768597747982, 0.040609211741069115, 0.09873162672049657, -0.446613437257762, -0.6101502063552663], [0.3137060918899141, -1.2834909432188906, -0.05196191683874026, -0.8098597529176627, -1.039054136286206, -1.5113870233469444], [-0.6449125459532982, -0.04500021828383533, -0.03073487989973701, -0.233240400171666, -0.5668358189532797, -0.9592208806054643], [-1.0855833687690823, 0.31374176094192596, -0.011958352746955144, 0.007112337074497345, -0.6987745387174548, -0.37693377835346675], [0.6356620601623326, -1.4318943333037963, 1.8487721480135433, -1.403051905211871, -0.9349013211396499, 1.831776797913828], [-0.035395908536560654, 1.6462460170323208, -0.6686888641317226, 0.3983926540419319, -1.203361121398653, 0.529746129343353], [0.540280359646029, -0.8016030917532955, 1.6471572945750286, -0.09991451019281647, 1.0930692357866083, -1.489855637048851], [-1.1271795728452494, 0.0030694969561238803, 0.07208683529874917, 0.15461426916294582, -0.29550297060423136, -0.7495818191094223], [-0.12152852735947414, -1.3027658224359306, -0.00019927504397419354, -0.08580405724737661, -0.7572263847024095, -0.8822508399322216], [0.5217046983304918, -0.5775482019319262, 1.656369627573072, -0.4964455117607055, 0.8832673971923386, -1.7209935993620316], [-1.130333380834273, -0.01668826588833618, 0.07739459299198598, 0.1630376534981281, -0.2701359248046588, -0.773307356240766], [-0.7908377122580569, 0.7883265924294549, 1.7855201470220174, -1.2888431261474211, -1.3564026505704991, 3.2103970661663674], [-0.6149608333062141, 0.3792785047669411, -1.2849141517514067, -0.373958794981907, 1.3759182664362943, 0.8528498600310882], [1.466437247454588, 2.0368252125497786, 0.9167570924155912, 2.1142636026996655, 1.0459833110236225, 0.4020029587927093], [0.19616042273538992, -2.6316986283013204, -0.1334335188114668, 2.2103799665343735, 0.4875642207576404, 0.17801399746636346], [-0.3948429719672309, 0.18117549633701593, -1.2866365197842857, 0.38317578451055273, 1.661010785630638, 1.1253676537659374], [-1.1105174995017477, 0.12579870333057744, 0.038902121322495466, 0.0967770847466375, -0.45468656812059277, -0.6023575652719363], [0.0888804282758843, 1.0666548942275398, 0.3133193928101371, -0.4442840500005737, 2.9939057292192395, 0.46949134460643827], [0.781336469133511, -0.6635890122085673, -1.407161535286031, -1.7982154142920446, 0.3813148802966207, -0.11802004398804641], [0.18781866223821717, 1.5928590240237996, -0.7918218887316103, 0.9254586466960173, -1.2221425034587639, 1.0458955468545128], [0.008764166609988331, 0.06756387292895968, 1.6515802223275033, 0.2184969500065516, 1.7042762088273784, -1.5332982243925164], [-0.9021876020652714, -0.03185246477365561, 2.0072881208126794, -0.9023267557806328, -0.2926049060073515, 2.2265216441951257], [-0.6589018610324254, -0.13044939240058473, -0.00780528611628606, -0.1974688361950392, -0.4573197413334419, -1.0618481390819963], [1.1766790873734991, -0.9366377669414488, 1.4255545740745001, -0.0663666868590789, 0.8110050905932961, -1.2269643458358397], [-1.0708965888588389, 0.44750121338141774, -0.04837995232157066, -0.06246158083584458, -0.8741754573405374, -0.21665752595328883], [-0.4415043387055981, 0.8129094179565197, 1.634658598333531, -0.03893004674124532, 1.1445027173198552, -0.9878354151725762], [-1.1365735052336379, -0.03834416082088873, 0.08300835815182883, 0.16702977005680417, -0.24386326508346767, -0.7994562593590889], [-0.031026096544580776, -0.7660161300161492, -0.1440391457034844, -0.3055545701513531, -1.4437063520883369, -0.23746083634185533], [0.8507857315040394, -0.8741739768773626, -1.416422914097263, -1.3156896180362718, 0.6006305703726948, 0.13181453370000182], [-0.6836303173458844, 0.4567071271330184, -1.238267149351531, -0.758228338826473, 1.3396859763108637, 0.4442587345882232], [-0.02321437337079897, 1.0193766048047053, 0.3917448256156366, -0.8196691927131138, 3.104844628629136, -0.09006767764879466], [1.4314015335106014, -0.7362560778338173, -0.7925120819301751, 0.41529444519906766, -0.8589764284756833, -0.401165413978007], [-0.03207244236089621, 1.2791748745055675, -0.6389260075555874, 0.8701615398470407, -0.8057716411507597, 0.5895392885034831], [-1.1382150305611225, -0.1106262137463072, 0.10315134877651115, 0.21647912310367823, -0.14561508274488605, -0.8857418877811561], [0.673103935437232, 1.8643072628390656, 1.0614518322382895, 0.6779723029502754, 1.060962519042061, -0.4130576714178562], [0.756161144898031, -0.8625699841570562, 1.4445434680944709, 0.28814432391549355, 0.9735836976996096, -0.912236407806197], [0.3167570226652594, -1.2637964514440334, -0.05725947428309015, -0.8184308461787286, -1.064390994955058, -1.487742265081834], [0.3805591717183749, -1.3798548045245878, -0.09343376478090831, -0.41524660870142865, -0.9778472330605715, -1.1254713097743982], [-0.9308830297177947, -0.21543673642156652, 2.056651114090816, -0.8229121415058662, -0.05656549282425744, 2.006101307307794], [-0.7082789036109999, 0.3168210104817211, -1.2008578392767026, -0.7029573716272861, 1.5180099751758873, 0.27616050301300443], [-0.4758640922003751, -0.028750328734714885, -1.3147909217713123, 0.2525090348458074, 1.7973271472275805, 0.9417142124714282], [-0.6482919158518532, -0.07046974564615627, -0.023842434774569196, -0.22108996675202225, -0.5337578878595243, -0.9897697124253453], [0.1770109747155721, -2.1368811739457403, -0.12678385089332236, 1.4172615637512647, 0.013795020446681443, -0.22762855761491155], [-0.3984241827560072, 0.2062332047614311, -1.4560603333523319, 0.09680195329024231, 1.4105347450286692, 1.2968428266623984], [-0.638760144695118, 0.7872110545725767, -1.3276320089811722, -0.9139811312246717, 0.9110121038293869, 0.8407266621912823], [-0.6169600144693008, 0.9609815717181202, -1.3747490122915436, -0.9992449614978584, 0.6846406679993559, 1.0490866489344812], [1.4290610462903912, -0.24438655592035732, -0.6950515077909444, 0.1160806860620773, -1.2130813094698345, -0.45385586531119604], [-1.0387589385040328, 0.654356648087019, -0.10401450573613111, -0.1523103511511516, -1.140242660494201, 0.03169321469660548], [-0.8504513147945858, -0.1278915931763589, -0.17983117863530731, 0.9068113199459752, -0.3932579446594069, 0.24508970961305368], [-1.1135523773052498, 0.12393220677728654, 0.039203028664658364, 0.09241967143307311, -0.45379604776433424, -0.6047405418257998], [0.7717124730087931, 2.0906480897686137, 0.8507722942259518, 0.7671004887257522, 0.5899115695523266, 0.43023501812819903], [0.5104161931633352, -1.5373790767661923, 1.9425514746795227, -1.7608892753702872, -0.7513547525313278, 1.2021224736962979], [-1.106064197480183, 0.15860075692768086, 0.03003278370833007, 0.08131883744614639, -0.49723185177198564, -0.5630086767079833], [-1.1079970077666357, 0.127348844535853, 0.03865221522476712, 0.10039595343078411, -0.4554261528232481, -0.6003784830492362], [-0.4545542186224427, -1.6290334872188827, -0.31707402204454305, 4.332928103532185, 1.0216373339971057, 2.4543811714730785], [-0.9353075120919806, 0.1976782139029467, -0.1228644644256762, 0.4424060229513337, -0.6717119782220082, 0.06069521213701437], [0.8124486270428453, -0.5108186383288934, -1.4477092360296053, -1.850681899679355, 0.18887346982258238, 0.06577795229889631], [-1.1177535345131748, 0.08013914517626844, 0.051172012241295264, 0.11634175511982706, -0.39603535275018137, -0.657183815437262], [-1.1378561907133042, -0.09258739932082369, 0.0981207114450329, 0.20404278267009188, -0.17016200457704048, -0.8642109508876219], [-0.850973651653326, -1.280370032773034, 0.004128540204596525, 2.470333953681183, 0.9711256503793899, 0.2919007002105656], [-0.4420630938992198, -0.8951505413888003, -0.08645081443975615, 0.9026172467352201, 0.28129311716433913, -0.8260490796355104], [1.4734753202021242, -0.22904966360404344, -0.777165587032022, 0.057848073490661955, -1.3167490191861433, -0.36237822773779876], [0.421550508925608, -1.7848877812503934, -0.4430656770460417, 1.5591089395517732, -0.6563917471202995, 0.843953729220734], [-0.19202031707508851, 1.2169483103996432, -0.5598158072989085, 0.4155432840440932, -0.7006567879241681, 0.009676955695671724], [-0.9119842845853046, 0.24232664528587333, 0.020779927904989243, 0.3858777786380913, -0.5052972193786625, -0.45311609039896555], [-0.07457313976944444, 1.8716051304762642, -0.7347352702386706, 0.160821043383716, -1.5340481867089577, 0.7964727675337], [-0.6769565846697856, -1.4056697524599986, 1.9609314725164357, 1.3831098121849208, 1.0699507992155408, 3.1575851134896595], [0.4011211450886063, -2.092419935489841, -0.04662208361066397, 0.2094017267308625, -0.20391568243889066, -1.4059193865973612], [-1.130333380834273, -0.01668826588833618, 0.07739459299198598, 0.1630376534981281, -0.2701359248046588, -0.773307356240766], [0.5579663589902016, 2.363675798155619, 0.9130258223589577, 0.098210444769469, 0.3126150994070239, 0.17651032684700518], [-1.0126735911901388, -0.5041081990587802, -0.005180325226825533, 1.0055474490091472, 0.14062017981294653, -0.282855638627224], [-0.8625946571675962, -0.6099208834128611, -0.11128993426082379, 1.7624242938598582, 0.15281853319984204, 0.593474531767279], [1.5124028915266319, 1.1460041769670584, 0.8856560585879723, -0.5014159474486113, -0.19046239491169095, -0.3510442307426953], [-0.6704332077427175, 0.5539141646152417, -1.2645509315955434, -0.804037983649473, 1.2136054255810185, 0.5608669485891232], [-1.1233541287607116, 0.254875917494156, 0.001776725157088479, -0.02121501047595192, -0.6388082565557586, -0.4490896148338216], [0.5238803005532945, -2.725037242935024, -0.3767935306530367, 3.6900481820346975, 0.3648683733482454, 2.0731648197004278], [-0.009174438576032415, 1.1226750332224078, 0.3638151544463723, -0.8683196330652737, 2.9708720698207833, 0.03384850423803853], [-0.7633351955343629, -1.399320300296852, -0.11055720646458086, 2.7897910754461375, 0.9676845682964734, 0.7228453537383205], [-0.022187046661679297, -0.7185452018800622, -1.313192429013542, 0.4759752762247739, 2.282775591359465, 0.46267530927185263], [0.5293469484940335, -2.08456507041098, -0.12049855573745025, 0.3144095056137451, -0.2512065205979125, -1.3190269134351011], [-0.0929495603031353, 1.2778783504274123, -0.5696386469769665, 0.5577869796699387, -0.7297269948081327, 0.08746700387772778], [0.6000665442297305, -1.4537861233864628, 1.8523014341284012, -1.4541591935675748, -0.9244565739103122, 1.803827310197326], [0.3213328469968353, 1.372850313334714, -0.7175407018272698, -0.06204386131189286, -1.2346797113037289, 0.0448952101131718], [-0.71357874092201, -0.21393883164714472, 1.8443150668854937, -0.1481722054079054, -0.26164701741633434, 3.0847738210562574], [-1.0908622126103489, 0.2748589459490369, -0.001444839849350109, 0.025436195003697162, -0.648342318425516, -0.4235770639538266], [0.780016758173194, -0.6733097159567892, -1.4045331570616302, -1.7936344498097445, 0.3939229353696055, -0.1296808653881364], [-0.6994668488696845, 0.34005868215435076, -1.2067266106587171, -0.7032567650388737, 1.4909826371866803, 0.304328877787144], [0.3344586371101335, -1.4972979547401042, -0.06388761653491024, -0.41667900506161215, -0.8421118719380858, -1.2679151901202235], [-0.0955954419131582, -0.5371223484559329, -1.2960459155291066, 0.00719237341590126, 2.101284066502345, 0.17785826738353488], [1.9592220295895604, 0.8567429553505964, 0.847852016793643, -0.7806963780665446, -0.15044958020506405, -0.8501531714144609], [2.4850464558764758, 1.4295867830546716, 0.7257858437999921, -0.20705056777237862, -0.6469243674330578, -0.13978899987431853], [-0.9021876020652714, -0.03185246477365561, 2.0072881208126794, -0.9023267557806328, -0.2926049060073515, 2.2265216441951257], [-0.9722552543741863, 0.13931853901200078, -0.10921098851272783, 0.4152603468079529, -0.6119871931060774, -0.010814393413479489], [0.5429835200093737, -1.297959160338576, 1.8778194082441986, -1.873599364011705, -1.061855175926826, 1.4893305446175078], [0.29216477990921913, -0.6587380360795673, -1.2927299015112226, -1.3919463313593603, 0.7081331258079735, 0.03972074001539269], [-1.1053761629281234, 0.12338766397177044, 0.039954682170496254, 0.10820981307659135, -0.44855048506184325, -0.6049667880668483], [-0.4545542186224427, -1.6290334872188827, -0.31707402204454305, 4.332928103532185, 1.0216373339971057, 2.4543811714730785], [-1.1019021804175857, 0.23243295105837491, 0.009639886985919601, 0.035488353083419015, -0.5962196188132224, -0.47474396606321584], [-0.41127196609509076, 1.054229322322488, 1.5692228358016334, -0.15741748469805125, 0.8301113215867363, -0.6984823387090822], [-0.6710006576288748, -0.24479907287843364, 0.023364657133970863, -0.13713070171223904, -0.3071198392909685, -1.1988431381151108], [-0.09092752526724143, -1.7291322828593645, 0.0404999539395207, 0.15101875518030605, -0.262597114762658, -1.31824577157866], [-1.0434965749912037, 0.04178315101188948, -0.00601910380284552, 0.48273056781284374, -0.4103258443088517, -0.20170706779533942], [0.05157662067756119, 0.534281805984465, 1.5972627674831144, -0.23883717840080185, 1.1953826122042326, -1.4718688954849517], [0.5104849151420722, -1.5373368116916972, 1.9425446609132668, -1.760790605807797, -0.751374917534649, 1.202176433978941], [0.9837240210950108, 1.8832802207386565, 0.927260604951353, -0.081794056320366, 0.6032242354653098, -0.5516277885165237], [-1.1260158175511046, 0.039421469164889825, 0.06198133235661909, 0.1303820541984043, -0.34472770566734473, -0.7061696881583693], [0.8208557323120673, -0.5947387511944692, -1.4235675060518365, -1.7738533804208125, 0.30861472310109683, -0.033866699793143766], [-1.1304716436829463, 0.001044822728825202, 0.07241324326312906, 0.14988758353467266, -0.29453698242117127, -0.7521667428288681], [-0.6704332077427175, 0.5539141646152417, -1.2645509315955434, -0.804037983649473, 1.2136054255810185, 0.5608669485891232], [-0.8769965140851074, -0.20729345939849844, 1.9849322238518141, -0.5286763523603993, -0.1458482934674437, 2.5159769612428193], [2.302163741179645, 0.6450878929934177, 0.8589269883345174, 0.21754386136649864, 0.22081103146849465, -0.5874403596714015], [0.3306742727641722, -1.1661465164745888, -0.08361465826931032, -0.8632065285205435, -1.1906828555999485, -1.3705685990173058], [-0.5559578170552512, 0.6472018589254609, -1.3557000453186874, -0.4576137293096577, 1.0408639231657442, 1.1754181443292673], [2.1274225208553497, 0.7463713771766364, 0.8911156834328959, -0.3837790117634791, 0.09349509724582494, -0.9730727956606142], [-0.6807209291105217, 0.5475870576549334, -1.263530906706856, -0.8188088762378265, 1.2166241386530812, 0.5527890619658568], [-0.6762795014701415, -0.28368188787132287, 0.03387817003157576, -0.11880684378303932, -0.25668761899902975, -1.2454864237154706], [-0.6783514735046178, 0.49558994212590773, -1.2487806622491362, -0.7765521967556729, 1.2892537560189257, 0.49090202018858337], [-1.0506363371468832, 0.566870314353018, -0.08035910171652057, -0.11108167081045212, -1.0267701648373397, -0.07325417790420444], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [0.5891503498474331, 1.221901440887384, -0.7289875353214017, 0.7506252987529559, -0.98802526551099, 0.376115790058108], [-1.121829957733991, 0.006359592575484169, 0.07155642235663186, 0.16229513330888976, -0.297072701401704, -0.745381318065324], [-0.24301796733858047, 0.16362863534526367, -1.2754557897300176, -1.0464203443608118, 1.381520040988129, -0.05972375664109117], [-1.1344307765638968, -0.02811728851584178, 0.08029837793633254, 0.16363047698157251, -0.25671281720221717, -0.7871492070291378], [-0.7021062707903175, 0.3206172746579061, -1.201469854209915, -0.6940948360742737, 1.5161987473326493, 0.28100723498696417], [0.11583986200814116, -1.705924639129003, -0.17100944286931746, 1.119544372362371, -0.5039504274490183, 0.21225817999462823], [1.5328156504913015, 0.05105971351550404, -0.7702745096240144, 0.0966791344936653, -1.5612670546038585, -0.09614877420180823], [-1.1098352687512745, 0.13846336766560233, 0.035401742894887886, 0.08885596822602734, -0.4716836831880892, -0.5872188870367223], [-0.5918324277707904, 1.1189804034041972, -1.4172008735885506, -1.0667797451051997, 0.48173448873349756, 1.2388101671189948], [0.6712782388577491, -2.7431926258475507, -0.2189988932378529, 1.8551661830734647, 0.2992987389608758, -0.10763231704001869], [-0.6965574606343218, 0.43093861267626565, -1.231990368014042, -0.7638373024502269, 1.3679207995288962, 0.41285920516477714], [-0.13414052459957138, -0.9714704737776256, -0.020613382308260545, -0.5490570152685206, -1.1108517861550404, -0.9851894943692403], [-0.5547940617611061, 0.6835538311342271, -1.3658055482608171, -0.48184594427419875, 0.9916391881026299, 1.2188302752803206], [0.9725521867173683, -1.61332089375613, -1.435813489618814, -0.6118785901908964, 1.3596417429621124, -0.10420730405802238], [-0.7097545702374882, 0.33373157519404234, -1.20570658577003, -0.7180276576272272, 1.494001350258743, 0.29625099116387754], [-0.6654983811774808, 0.6104035260860863, -1.2800253937242319, -0.8358073293938955, 1.138832521934009, 0.628489289868916], [0.5767380973942804, -0.5330633291555068, 1.5745469072327567, -0.22646616184362403, 0.7447648121754781, -1.1677190622100153], [0.6000665442297305, -1.4537861233864628, 1.8523014341284012, -1.4541591935675748, -0.9244565739103122, 1.803827310197326], [-0.6833603510311552, 0.5281456501584886, -1.2582741502580537, -0.8096469472732265, 1.24184024879905, 0.5294674191656767], [-1.1309860297513368, 0.0007284673808098313, 0.07246424450756347, 0.149149038905255, -0.294386046767568, -0.7525706371600313], [-0.4521830058649073, 0.752887506127596, 1.650702560758071, -0.015407585746752574, 1.2209610288492594, -1.059967802111871], [0.3730057904531019, 1.1763199158209923, 1.2551288294884475, 0.4003654817025063, -0.7183645880316897, 3.991324267573703], [-0.14195843725807106, -1.0353060180363172, -0.0032907399183962796, -0.5173762374130599, -1.027588203253072, -1.061721810010093], [0.9664306470188228, -1.3319957765264654, -1.5151274758228224, -0.827892648789757, 0.9703719320652995, 0.23097291361550437], [0.5330964609039989, 0.8542150682780624, -0.7844706973910772, 0.9803359887186543, -0.7293282332978923, 0.5029910712064833], [-0.5283998425547727, 0.8779679060986733, -1.4183731127488415, -0.5734652339201979, 0.7394780866429378, 1.4520467032821192], [-1.1138095703394448, 0.1237740291032788, 0.039228529286875514, 0.09205039911836428, -0.45372057993753273, -0.6049424889913817], [0.005527954570022319, -1.379655275080202, -0.1237883671390108, 0.27729707655634206, -0.7966754761639199, -0.39910609892610927], [-0.10053358325130633, 1.873457180623051, -0.7371563574969224, 0.11059603302349304, -1.5508722745145924, 0.797337885521199], [-1.0477466466956895, 0.0035330467150310567, 0.0043924066058903975, 0.502531515000879, -0.36019549532411943, -0.24754256473337272], [0.15281035866690063, -1.199717205739974, -0.244509822338853, 0.2931019235305761, -1.0695327197509152, -0.10600784982534309], [1.2247116278978705, -1.019312277905176, 1.371181682969119, -0.04654468115398206, 0.8437732149235718, -1.2521839098473921], [2.129658287014112, 0.6820469586557099, 0.9093119185911591, -0.3328136317679047, 0.18296087189816437, -1.049667802758349], [1.5068755957630142, 0.1369494700800967, -0.7228690605344473, -0.21328884407166085, -1.5855957592311163, -0.4924380624362477], [3.9296737089827967, 1.9082379601481443, 0.6974377731261053, 2.1650004321404213, -0.508662073655915, 0.5057967281369494], [0.9817008195521815, -0.543938957214335, -1.5804770802704466, -1.4399486128380161, 0.11260131645302139, 0.6033038101716482], [-0.9021876020652714, -0.03185246477365561, 2.0072881208126794, -0.9023267557806328, -0.2926049060073515, 2.2265216441951257], [-0.31243512415925484, -1.6860716851242985, -0.14876587173175426, 2.5163699625001517, 1.0064999199016744, 0.22694074913227233], [0.21745280306968923, 1.3845143304835594, -0.8046281963146819, 1.3313914015414448, -1.027806354480017, 1.2963411811608323], [-0.26785646643131855, 1.378552339782796, -0.6106750513054114, 0.1552915909563759, -0.9640591184779622, 0.19847458797815046], [0.07624691711046554, -1.1278562204897966, 1.9475686122041855, -1.5452066772781199, -0.9493738666562115, 1.8453052924224747], [-0.6648843975867217, 0.6642355026336011, -1.2950714453996701, -0.8737804500254259, 1.065327477777265, 0.6927189187669361], [-0.8113823228438265, -0.9887489203263643, -0.07472280652743978, 2.332905019212184, 0.592883998189851, 0.641725342213265], [-0.6284652655547358, -0.2848461967937978, -0.03567633223075733, 0.1707614007710765, -0.33654393933491805, -0.7470248889947184], [1.9570143420723853, 0.5973049075252522, -0.9733977854536018, 0.3676074197736812, -2.1842374012678687, 0.6481242238885432], [1.7222605854784716, 0.3585051653274861, -0.7691997183237939, 0.031198589829881725, -1.7710040439543955, -0.21707255770457248], [-0.17113624269844618, 1.2297923375290694, -0.5618864578229436, 0.4455281959984512, -0.7067847754604555, 0.026075065540902578], [-0.6856511669887707, -0.12908250481551375, -0.010148157866073024, -0.24882647859893708, -0.47391235422071065, -1.061602433440769], [0.931197946710191, -0.7353621027791383, -1.530514207520638, -1.3958955728863367, 0.34739496175410783, 0.372406519749536], [0.6043978530179509, 1.7382933100915892, 0.9372923134906661, 0.7081954700361622, 0.981189277656893, 0.0013716245593667904], [0.5429835200093737, -1.297959160338576, 1.8778194082441986, -1.873599364011705, -1.061855175926826, 1.4893305446175078], [1.5840177552811288, 0.58145722302878, -0.9152128216691133, -0.19244950977200062, -2.260656804929137, 0.5390319133494], [-0.08927287605908289, -0.6465411391770278, -0.10841566864555714, -0.700762525630942, -1.5318802190417367, -0.5953697328727268], [-0.7283826608768637, 0.4882111548245352, -1.2503774645602044, -0.8653883778084206, 1.2718480172071371, 0.4795130495841843], [-0.6690383189078858, -1.3473455299706643, 1.945161203170028, 1.3556240252911216, 0.9943024687776333, 3.2275500418902], [0.5283775181476834, -1.4083391462007064, -0.2290715072310236, -0.04823913948143781, -1.072271707987475, -0.5835260637999101], [-0.80535453282154, 0.6813988511990097, 1.8144323074904305, -1.2384525168421214, -1.2177140447676686, 3.082128030765378], [-1.1121487756589759, 0.11922240291766027, 0.04062618495521687, 0.09848583906782639, -0.4465632058722429, -0.6102846223886773], [-1.137049225975723, -0.1099092259855651, 0.10303575955612508, 0.2181529606517904, -0.14595716331021222, -0.8848265016690076], [-0.6683612357082412, -0.22535766538198904, 0.018107900685168408, -0.14629263067683906, -0.3323359494369377, -1.175521495314931], [-0.19171168543405434, 1.2171381236084522, -0.5598464080455691, 0.4159864108217438, -0.7007473493163301, 0.009919292294369626], [-0.43834930031651975, 0.8148498151191074, 1.6343457771006686, -0.03440010940224896, 1.143576938394916, -0.9853580889029533], [-0.6333607161879486, -0.00783246088411889, -0.04030803468709654, -0.23850673158619312, -0.6114640146243956, -0.914298250271457], [-1.1278141194992155, 0.0026792409988120607, 0.07214975043388337, 0.15370320050809613, -0.2953167763819465, -0.7500800631563456], [-0.17202354179620968, 0.6171081147237982, -1.3973812129411602, -1.2423738179582566, 0.7985307945587733, 0.48475191438631376], [0.5263214466658721, -1.4206883667130294, 1.9110041222204521, -1.8157621795953967, -0.9026715784104643, 1.3421062907800207], [1.4648374806950006, 0.039822892795774216, -0.6987207585798547, -0.2218400595336235, -1.4754940566467272, -0.610442966704163], [2.179152995757842, 0.3556169227682374, 0.9310641730898758, 0.19634629785666566, 0.5502054538620816, -0.9390181356386397], [0.002165077379343181, 1.266620948742711, 0.3242926997925511, -0.9516001307052322, 2.7796564056662705, 0.206099488240829], [-0.14017413899054848, -1.0520267695398293, 0.0015274058282708023, -0.5018628246354682, -1.00367013972809, -1.081569961562268], [-1.122535275657949, 0.04156205599170135, 0.06163623753627862, 0.13537934257889608, -0.3457489966738849, -0.7034367775559857], [0.4726456986044836, 1.817867892368189, 1.0597846572595242, 0.33430148296941736, 1.0143715154093722, -0.4788139596033137], [-0.4693219649771261, 0.6265289869403992, 1.6848697640334733, 0.044169767622695394, 1.3848606733600988, -1.2115449094635131], [1.4604694084468115, -0.7550677362634985, -0.7068639759908996, 2.0132107159900925, -0.32232232409735073, 0.252552803379889], [0.29354281592690085, 1.0584239762882286, -0.6314316568051939, 0.11418133074719582, -0.8186623421166566, -0.33151525958871936], [2.9802333394656997, 1.2761951118924186, 0.6506127121782477, 0.9047808413889957, -0.3788858226910514, 0.27117457201976825], [0.5429835200093737, -1.297959160338576, 1.8778194082441986, -1.873599364011705, -1.061855175926826, 1.4893305446175078], [-0.3212849507256502, -0.5136093307182082, 1.886502968621098, -0.39183910162595736, -0.1302890112449384, 2.4995404275497948], [-1.1682243014114788, -0.0756280099454028, 0.09114158478672953, 0.13453778192224686, -0.21013438407428128, -0.8455575424368804], [0.9771742287553464, -0.9035315476254587, 0.5833784964247414, -2.761024110069694, 0.6494047212562665, 2.6120752657873574], [-1.1105174995017477, 0.12579870333057744, 0.038902121322495466, 0.0967770847466375, -0.45468656812059277, -0.6023575652719363], [0.9459209164956984, 0.14983635950770408, -0.7112163743776874, 2.337592834035763, 0.24919872623854214, 0.25260703312337723], [2.2600378719791308, 0.7622324098379241, 0.8963848167688612, -0.14561738331007074, 0.14470375514772918, -0.9472941606579734], [2.2472083218971206, 0.36183575086265335, 0.9343066221729538, 0.3199618048412785, 0.5791193213881742, -0.9280796604646369], [0.05729999392243513, 1.0026871351125726, 0.3289382307698401, -0.45724768147922484, 3.0642764059113143, 0.3915715439609781], [2.042465275020441, 0.6584851187761593, 0.9095292890933837, -0.4798557169310785, 0.16730725028088966, -1.0822793131581732], [2.134563812507949, 0.608218402266541, 0.9303680938630517, -0.2699132556278039, 0.2869326603413288, -1.1374588915471253], [3.107958677957422, 0.08730619525327857, 0.6920382854283762, 2.4748192090461174, 0.8918723716593553, 0.4229789486166851], [0.4768091785382379, -1.01587079190507, -1.406921618500067, -0.5180192275337997, 0.977025881766805, 0.6890144478199904], [-1.111977587975416, 0.11932768597747982, 0.040609211741069115, 0.09873162672049657, -0.446613437257762, -0.6101502063552663], [-0.4557263958499525, 0.41126960194666123, -1.4366689720100831, -0.029415009175675785, 1.204819098626982, 1.4675065158580272], [0.3277435025744026, -1.1857671076401495, -0.0783290147156601, -0.8544629112340457, -1.165381255499779, -1.3941190075666563], [0.8375886219008728, -0.9713810143595858, -1.3901391318532512, -1.2698799732132724, 0.7267111211025413, 0.015206319699102416], [-0.6029719931003963, 1.0230387873140359, -1.3911210963222762, -1.0180159217645295, 0.6072112968489307, 1.1238175304427478], [2.0333575587193766, -0.15222638998720564, -0.9121365661793633, 1.1241515039954078, -1.3289071586876773, 0.3264786968508675], [2.513939608573007, 0.6203021661672845, 0.7269379912811417, 0.702283295635309, 0.15650283785854713, -0.037774222322689524], [-1.1304716436829463, 0.001044822728825202, 0.07241324326312906, 0.14988758353467266, -0.29453698242117127, -0.7521667428288681], [-1.114617009896049, 0.0998862784810351, 0.045865968189871605, 0.10789355568509645, -0.42139732711179284, -0.6334718491554459], [0.9816275307769113, -0.8707294940283549, 0.5745091588105755, -2.7764823573701842, 0.6068594376048742, 2.6514241543513095], [0.9453333593959775, -1.2561468333872572, -1.4568678265312702, -0.7920391273029603, 0.9617865573935931, 0.2494465927622221], [-0.4545542186224427, -1.6290334872188827, -0.31707402204454305, 4.332928103532185, 1.0216373339971057, 2.4543811714730785], [2.382176147065756, 0.9743212673207472, 0.8458766940638777, -0.06981536777346047, -0.07902344879290069, -0.6880443377226025], [-1.0317007159322076, 0.7566972367712405, -0.13218714816024635, -0.21340985758300743, -1.2767426993197863, 0.1541057822957266], [0.7905744458557272, -0.5955440859710109, -1.4255601828568398, -1.830282165668144, 0.29305849478572865, -0.03639429418741654], [0.5189514798156084, -1.4854087944208507, -0.20815668417356978, -0.009966625438319153, -0.9717393258415251, -0.6759240674720705], [2.1105125268010583, 0.13548726636211209, 0.9066772912141688, 0.09640698033551694, 0.7073306045197818, -1.1341995052633607], [0.6571495684500872, -1.6096130864343499, 1.826783460012604, -1.0347190231234447, -0.7870579718937988, 2.118324075777144], [-0.13605962227486895, 1.5576091003607562, -0.651215495504859, 0.27328915715830754, -1.1371610533023089, 0.4188315308843781], [0.4590547815850739, 1.4745834531612798, -0.7387939439090719, 1.5221186072967514, -0.910664928429009, 0.918882467462564], [-1.0516887973648612, -0.43927905157020186, 0.05485588101285496, 1.0156748022926605, 0.13729243732459065, -0.27845105305451695], [1.7216398067183976, 0.04803565912956487, -0.7555932892817897, 0.45439884086541327, -1.4532267915789199, -0.08998269223770257], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [0.0023658776601791064, 1.1578099921196754, -0.6023800903640794, 1.0232197915409496, -0.6203438105721447, 0.4465867338676828], [2.3907096268184698, 1.068660100675724, 0.8200553095024051, -0.12232095362461226, -0.20373555651291952, -0.5750979912857619], [-0.599736265771775, 0.28011892952557116, -0.11856776698369399, -0.3845027837564371, -0.9879548132321383, -0.5691587825102528], [-0.10857371340533413, 1.5656042961013965, -0.651443186968782, 0.31922862088799764, -1.1330053948311, 0.4297887920294343], [-0.09823486383379133, -0.5565637559523778, -1.290789159080304, 0.01635430238050117, 2.126500176648314, 0.15453662458335465], [2.4942820881366865, -0.5662792721496358, -0.9824463252839506, 2.8890427904645835, -0.8306190602520318, 1.348677016939484], [-0.6757120515839843, 0.5150313496223524, -1.2540374186979386, -0.785714125720273, 1.2640376458729567, 0.5142236629887633], [-0.6836303173458844, 0.4567071271330184, -1.238267149351531, -0.758228338826473, 1.3396859763108637, 0.4442587345882232], [-0.6545966762189177, 0.6705626095939095, -1.2960914702883575, -0.859009557437072, 1.062308764705202, 0.7007968053902026], [0.7589013828081274, -0.8288409759283462, -1.362479105471211, -1.720339018092945, 0.5956518165373598, -0.3162540077895756], [0.8011321335382606, -0.5177784559852324, -1.4465872086520493, -1.8669298815265438, 0.1921940542018514, 0.056892277013303155], [0.3929711733573905, -1.295375702446993, -0.116206968348426, -0.45328284592970175, -1.0869004815922014, -1.0240825528749453], [-0.5524191376682995, -0.9454695964817291, 0.0006390510454626902, 0.8621211557795119, 0.39666533807779036, -0.9626578043575006], [-1.071870642697471, 0.39344778809029174, -0.03329819977502727, -0.02500544144490601, -0.8005647582262716, -0.2811698808831237], [-1.119704921881748, 0.025484644723913395, 0.06635066715226368, 0.15239465971487204, -0.32213787589407006, -0.7224635695963074], [-0.15467588850995975, 1.2399157086655619, -0.5635184976448432, 0.4691616241398169, -0.7116147163757561, 0.0389996841381289], [-0.36588764762733894, 0.032360496208625356, 1.6311781191782273, 0.9374884841171164, 1.934219893826334, -0.8491334615017907], [-1.0539481057687492, -0.08937155803418248, 0.02998257192559524, 0.5583853667839591, -0.23616766948327578, -0.3586578158304511], [-0.45663630788647197, 0.7200854525304925, 1.6595718983722365, 5.0661553738564945e-05, 1.2635063125006525, -1.0993166906758245], [-1.1053761629281234, 0.12338766397177044, 0.039954682170496254, 0.10820981307659135, -0.44855048506184325, -0.6049667880668483], [1.4568132468586228, -0.3517788699784971, -0.7439808730557685, 0.11568525790697023, -1.157565421669782, -0.509602481575285], [0.32911668679808537, -0.38655833112934246, -1.366324491794456, -1.5202133368637587, 0.3551075837644036, 0.36622373921791185], [0.5435151894496619, -1.2976321754508675, 1.8777666933579515, -1.8728360042827392, -1.0620111830183903, 1.4897480097981988], [0.5435151894496619, -1.2976321754508675, 1.8777666933579515, -1.8728360042827392, -1.0620111830183903, 1.4897480097981988], [-0.5700811198532956, -0.7860910630164756, -0.35706238619353603, 2.2477985058722596, 0.126457764258407, 1.1085618669924848], [0.06997199346793187, 0.9926624874472938, 0.3326768637763119, -0.4261019376575264, 3.084999705017871, 0.3802723813799378], [0.6962699559401875, -0.28667470087404334, 1.433163942138222, -0.3112118301582603, 0.3654869354802266, -0.7968107539754957], [-1.096982176424138, 0.2532769516257806, 0.004157011419792859, 0.029600835587805338, -0.6221049172730065, -0.44963161735638996], [-0.8451045778449146, -0.18767942782154254, 1.9817701466968831, -0.482886585336503, -0.15520630399083815, 2.5410184097749453], [-1.1093562683178801, 0.15657608270038226, 0.03035919167270987, 0.07659215181787317, -0.4962658635889252, -0.5655936004274283], [2.222230734873711, 1.1126534135240218, 0.7219956298761805, -0.2728173548757582, -0.46454153693853867, -0.033977374006066714], [0.9885520975600711, -0.5460106134947343, 1.5301014721619417, -0.9711971315777849, 0.49425097114321714, -1.8364942194039169], [0.5429835200093737, -1.297959160338576, 1.8778194082441986, -1.873599364011705, -1.061855175926826, 1.4893305446175078], [2.0775356808953727, 0.5731452237113246, 0.9360224182290093, -0.3517930399306764, 0.30366635288795585, -1.182237202023611], [1.069903540007858, 1.4780762137438517, 0.973783597793792, 0.5736955717195719, 1.0987095084773815, -0.5330222008795734], [-1.118941843167555, -0.2540705635095816, 0.0713920932058911, 0.5557296199372516, -0.04600524393780613, -0.5584349338184023], [-1.1350978386071495, -0.05525472553321006, 0.08785710464515654, 0.18210005605674556, -0.2198546401663235, -0.8195467475099624], [0.9000531181022443, 1.968248257935443, 1.0489399969155928, 1.0297242459620104, 1.0432523631701425, -0.277356237347502], [0.03095653780021233, -2.209155290190939, -0.036154499044358926, 1.325510475513969, 0.1396421757201906, -0.39226754891963656], [2.1670744800500654, 0.514124591773941, 0.8857439056545676, 0.05838931873292162, 0.3261302873202788, -0.7506133714784967], [0.3146994783916986, -1.2650618728360954, -0.05705546930535277, -0.8213850246963994, -1.0637872523406453, -1.489357842406487], [1.4268050473733354, 1.7505109208936107, 0.8396654903641546, 2.3157459855823257, 1.2020952362017678, 0.6267692065170664], [-0.4917969470288688, 0.5503430987191533, 1.7045808572280574, 0.05723110692321234, 1.4770011766065523, -1.3035643144245677], [-1.136229487957608, -0.055950707298843994, 0.08796930738291217, 0.18047525787202662, -0.21952258172839662, -0.8204353150385217], [2.776155160835092, 1.3235331581652399, 0.776843426945551, 0.4181420306234261, -0.34127798007034504, -0.25119818723383636], [1.1528484865306552, -1.5624166428689799, 1.5181685313986666, 0.2129192841086417, 1.5492255070857368, -1.9035875786618395], [-1.1012997397073063, 0.1971672165725547, 0.01957027205515969, 0.06225643488752884, -0.5475131364103208, -0.5167692854387865], [1.4032932384706485, -0.028598563114777525, -0.7574323962754423, -0.08928640319370686, -1.5232614570265883, -0.19784936678873308], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [-0.4531434136894591, 0.47373326309736724, -1.2261544565161457, -0.33663923543611585, 1.44314589098873, 0.4764920912700132], [-0.6954241718320412, -1.691032856204539, -0.09718830626933254, 3.328390299093905, 1.2974389473919858, 0.875850814881942], [0.7905744458557272, -0.5955440859710109, -1.4255601828568398, -1.830282165668144, 0.29305849478572865, -0.03639429418741654], [-0.9022731959070516, -0.03190510630356553, 2.0072966074197542, -0.9024496496069685, -0.2925797903145921, 2.2264544361784213], [1.3935864804832232, -0.3710869766907656, 0.29505936846235475, -1.753965901014829, 2.4905687780954326, -0.9770300848583517], [0.9086807938098286, 0.7519928446438902, -0.8827506765064429, 1.8297837014925749, -0.5972862587126273, 0.9687873333680659], [-0.664128447375653, -0.1871181906248237, 0.007698103918245275, -0.16611839296442282, -0.3824612269837088, -1.1296995692264247], [-1.0614752514295118, -0.2314804026629088, -0.004114496071364068, 0.8462049575863967, -0.15313547074662742, -0.031145222544121637], [2.2810483658145775, 0.48955663302186037, 0.9009810399249368, 0.29083929308329787, 0.4225399126362488, -0.774013502072841], [0.5602225403932772, -1.7092136583075443, -0.2234610317005946, 0.09970892510855334, -0.7490988646738037, -0.8697999559390598], [-1.1282256283539276, 0.0024261567203997195, 0.07219055142943091, 0.15311236480456203, -0.29519602785906407, -0.7504031786212761], [-0.12579599072133882, -0.9184571303091471, -0.03486359299968414, -0.5718798693234606, -1.1789804969590107, -0.9215360457607883], [-0.5747063059000032, 0.897877640751959, -1.353963164197556, -0.8738202056236325, 0.7944503552707836, 0.9760182438329881], [-0.6991968825549549, 0.41149720517982097, -1.2267336115652394, -0.754675373485627, 1.3931369096748658, 0.3895375623645972], [0.7680187894991684, -0.7519611409170113, -1.3833633277820043, -1.7590546589137144, 0.4952099957835714, -0.22409834071611334], [-0.6160871833639646, 0.16315412919888786, -0.0869762270464452, -0.33026975459825564, -0.8365072167027198, -0.7094925336424958], [-1.1166218851627163, 0.08083512694190235, 0.05105980950353952, 0.11796655330454607, -0.3963674111881084, -0.656295247908703], [-0.11888910543884068, -1.2833244149394858, -0.005456031492776742, -0.09496598621197631, -0.7824424948483786, -0.8589291971320417], [-1.0540542626737368, -0.035982478973889016, 0.015007921992363822, 0.5193782836712435, -0.3094614037249745, -0.2949936389960592], [-0.1515034057756671, -1.1302669325476309, 0.022630933490131837, -0.46632292572119466, -0.9025792956638081, -1.175462374259733], [-0.6017637237030475, 0.4764855422491642, -1.3111979339954187, -0.41976843980490686, 1.2498377157064484, 0.969458074031988], [-1.0518708637110195, 0.5661110615177809, -0.08023669872987815, -0.11285417792105457, -1.026407919268692, -0.0742235242989964], [-0.1254298338797898, -0.5554709586408276, -1.2930878433519135, -0.035643215090324434, 2.110038334411327, 0.15443239617606186], [-1.130950644116341, -0.017067892305954693, 0.0774557944853071, 0.16215139994282685, -0.26995480202033506, -0.7737920294381618], [0.11583986200814116, -1.705924639129003, -0.17100944286931746, 1.119544372362371, -0.5039504274490183, 0.21225817999462823], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [-0.9058570265227396, -0.03410921728425852, 2.0076519432899778, -0.9075952377490469, -0.2915281914288085, 2.2236404235943397], [1.7639521834206169, -0.12820126757997685, 1.205840334817659, -0.8437763113381999, -1.3569942588854103, 2.7081397801464124], [-0.7283826608768637, 0.4882111548245352, -1.2503774645602044, -0.8653883778084206, 1.2718480172071371, 0.4795130495841843], [-0.6028900737607985, 0.2603611666811115, -0.11326000929045724, -0.37607939942125507, -0.9625877674325654, -0.5928843196415956], [-0.13208298032601073, -0.9702050523855638, -0.02081738728599822, -0.5461028367508495, -1.1114555287694527, -0.9835739170445871], [0.9914092323585031, -1.01399199368549, -1.5213769387610616, -0.8813030252544641, 0.6549670373410741, 0.5406153476369545], [0.9444214826743407, -1.4524404856813182, -1.4829749221967956, -0.7817836105551699, 1.1234798207843526, 0.0861963248404648], [-0.6704332077427175, 0.5539141646152417, -1.2645509315955434, -0.804037983649473, 1.2136054255810185, 0.5608669485891232], [0.08719144028155898, -1.7924356446672716, -0.08315691162566065, 0.6003858772598303, -0.2925877783143674, -0.8162561051992764], [-0.45387590648906295, 0.7217831418700824, 1.6592982052941034, 0.0040139874530455665, 1.2626963314091562, -1.0971492321370693], [-0.9215578193081433, -0.1740653437190488, 2.045736405003993, -0.8354263119622146, -0.10818503334169956, 2.055921787274817], [-1.1094237598965624, 0.1387164519440146, 0.035360941899340505, 0.08944680392956156, -0.47180443171097175, -0.5868957715717918], [0.019091693803680133, -1.389131489114161, -0.12013814988989074, 0.30972316134759903, -0.7762138391064494, -0.4097050702946443], [-0.9618180923982454, -0.4728312846797491, -0.010222634659570821, 1.0785646977590075, 0.12569771510955718, -0.2429240601246983], [-1.1138095703394448, 0.1237740291032788, 0.039228529286875514, 0.09205039911836428, -0.45372057993753273, -0.6049424889913817], [0.8481463095834059, -0.8936153843738076, -1.4111661576484609, -1.3065276890716717, 0.6258466805186642, 0.10849289089982227], [0.799200424317542, -0.8931470572169663, -1.3414994526483726, -1.5977207318105062, 0.7060350592924796, -0.39085721134948975], [-0.9021876020652714, -0.03185246477365561, 2.0072881208126794, -0.9023267557806328, -0.2926049060073515, 2.2265216441951257], [-0.6340487507400081, 0.027380632071791203, -0.050229933149262926, -0.26539770721663797, -0.660145381334538, -0.8723401389125917], [0.2764626743460684, 1.0423463858951556, -0.6281758421976289, 0.0937089310863059, -0.8060058388222519, -0.351572717489887], [0.3929711733573905, -1.295375702446993, -0.116206968348426, -0.45328284592970175, -1.0869004815922014, -1.0240825528749453], [0.8850982164722727, -0.6214356794235828, -1.4847607479316944, -1.434794694576071, 0.2728211384750948, 0.434995890102341], [-1.1441831393545034, -0.09647857010141318, 0.09874802675157536, 0.1949586837282544, -0.1683054960377218, -0.8691788511609307], [0.013762290539277598, 1.5389995317548928, -0.7084062692653486, 0.7675998630061976, -1.1369620316890705, 0.9017678779238807], [1.829048030731632, -0.7717218759940329, -0.8268215432412838, 1.3884662172162858, -0.6993007961501637, 0.07449483999145656], [0.20868403776317193, -1.9035842839884047, -0.1898649282789512, 1.307318416176066, -0.28879830130494943, 0.052231155987247764], [1.1349802649994243, -1.9255972163307782, -1.6375914897645234, 0.3863105923259991, 1.4470190166081978, 1.094806049686002], [1.3056622983170585, 1.32141464774845, -1.1705724321642308, 3.6301212670213654, -0.986995077879966, 2.982203383860036], [-0.6780815071898885, 0.5670284651513781, -1.2687876631556583, -0.8279708052024262, 1.1914080285071114, 0.5761107047660364], [0.9947235700659594, -0.8159542786253702, -1.5766511974761699, -1.0190114753359474, 0.38513660841556924, 0.7769587018807675], [-1.1264915382931895, -0.03214359599978656, 0.08200873376091533, 0.18150524479339075, -0.2468216038940892, -0.7915399304682879], [-1.095201868453644, -0.11474325694501873, 0.034072871729231, 0.49915408750466106, -0.2240626300643039, -0.39105014118974957], [0.7694590704906612, -0.7510753459425681, -1.3835061312664203, -1.756986733951345, 0.49478737595348254, -0.22296743658885618], [-0.6435237035686446, -0.04414605884419382, -0.030872583259709988, -0.2312463296722385, -0.5672433452180082, -0.9581303659113232], [0.642071731531649, 0.20877815316330875, -0.6766163798651625, 1.5239602434046557, 0.10892819277075966, -0.1902022845728414], [0.8572941329324578, -1.2797215126795873, -1.3833250269541706, -1.1393615618775383, 1.0534460458412467, -0.2805994786555021], [0.29229920586986885, 1.1589948308738234, -0.6597163808904433, 0.038737357298706435, -0.9573024996980672, -0.21164286068880742], [1.1275811548633181, -1.573157508011086, 1.5270130815814997, 0.5025577865661038, 1.5493623773404004, -1.491217789641543], [0.3560634002023989, 1.5189369948071134, -0.7559496362930124, -0.10283935377449352, -1.415962048979373, 0.2209097705791921], [-0.367351400778697, 0.02550048126058188, -1.4765018695108785, 0.48537294600462355, 1.5677863134911176, 1.5802918429462876], [-1.0362881358320994, -0.01948299708870272, 0.011684092125221195, 0.5408356064499249, -0.32231919480702487, -0.2743975476148519], [0.47721916121162433, 1.8507438465745882, 1.0509034057546587, 0.31901575969435825, 0.9717909731892979, -0.43937072132360105], [1.2717738343205394, 2.0298644888790744, 0.8338868078963604, 0.5526993452713558, 0.45287589345879464, 0.13546596970702826], [-1.1105174995017477, 0.12579870333057744, 0.038902121322495466, 0.0967770847466375, -0.45468656812059277, -0.6023575652719363], [0.01467457651077667, 0.5650408481219626, 1.5859364201687527, -0.3306750176164302, 1.1328858486331193, -1.4370967341782646], [-0.09261531748940618, 1.9317814031123857, -0.7529266268433299, 0.0831102461296934, -1.6265206049525, 0.8673028139217391], [1.3850770021706877, -0.1589556450769209, -0.7222231750483867, -0.028830910162575617, -1.3544695934263853, -0.35425075311786564], [0.8172386313767898, -0.3296914879810947, -1.4981347482513863, -1.9733200637608967, -0.05694833418577802, 0.28203081839183586], [-0.9021876020652714, -0.03185246477365561, 2.0072881208126794, -0.9023267557806328, -0.2926049060073515, 2.2265216441951257], [0.32925111275873487, 1.431174535824048, -0.7333109711736768, -0.08952964820569219, -1.3103280417416365, 0.11486013851371163], [-1.0955065097976497, 0.23636638691345943, 0.009005757913120254, 0.04467112158774679, -0.5980962923558624, -0.46972210550726334], [-0.09094960392270277, 1.612079639446656, -0.6631807297328115, 0.3186298340648222, -1.1870600708095138, 0.48612554157771404], [-0.0847193832088922, 1.2829400359956586, -0.5704546668879165, 0.5696036937406216, -0.732141965265783, 0.09392931317634107], [-0.6017637237030475, 0.4764855422491642, -1.3111979339954187, -0.41976843980490686, 1.2498377157064484, 0.969458074031988], [-1.0876449033396716, -0.03882311751742299, 0.013343366793553593, 0.45819799769609026, -0.32404657241932183, -0.30011972795930447], [-1.129357277704386, 0.001730174954765843, 0.07230275416718643, 0.15148756661984314, -0.2948639694211371, -0.7512917461498356], [0.3833191142301635, -1.894616434627119, -0.029993868189208954, -0.16659117996378023, -0.377282084575878, -1.6683236621486437], [-0.6743087964669746, -1.535501596232982, -0.13924235785975178, 3.2550948673771054, 1.0957100662242318, 1.0624239572833818], [-0.7283826608768637, 0.4882111548245352, -1.2503774645602044, -0.8653883778084206, 1.2718480172071371, 0.4795130495841843], [-1.0532757590675166, 0.5474289068565733, -0.07510234526771815, -0.10191974184585212, -1.0015540546913706, -0.09657582070438447], [0.43253479661656147, -0.4620483204739776, -0.27353620697717373, -1.1832133276748855, -2.100470265047763, -0.5256176636063559], [1.2165550081059047, 1.5682691234630464, 0.959243143005555, 0.7842546455665532, 1.0556777536351267, -0.41787192706490967], [0.11583986200814116, -1.705924639129003, -0.17100944286931746, 1.119544372362371, -0.5039504274490183, 0.21225817999462823], [1.559666603261271, 0.06757346268190886, -0.7729367745834879, 0.1352311641492684, -1.5691458957219422, -0.07506549011508272], [0.5591473976562443, 2.4000383999040777, 0.9029186057750148, 0.07400304490447618, 0.2633852929059495, 0.21993602864758513], [-1.106064197480183, 0.15860075692768086, 0.03003278370833007, 0.08131883744614639, -0.49723185177198564, -0.5630086767079833], [-0.5365826061444998, -0.828821151503061, -0.03090148764735178, 0.8071495819919124, 0.24536867720197483, -0.822727947556421], [-1.0362881358320994, -0.01948299708870272, 0.011684092125221195, 0.5408356064499249, -0.32231919480702487, -0.2743975476148519], [-1.1331110656035799, -0.01839658476761943, 0.07766999971193135, 0.15904951249927257, -0.26932087227520174, -0.775488385629048], [-0.10275493693460909, 1.7473641571452263, -0.7019707016794317, 0.1980675460652914, -1.3791290701466665, 0.6468494467847006], [0.6749559192293838, 2.7028980140231935, 0.826500461180368, 0.07190825276398634, -0.08833741796034839, 0.5871078179557219], [-1.1298189947658828, -0.016371910540320778, 0.07734359174755166, 0.16377619812754576, -0.27028686045826195, -0.7729034619096025], [-0.42687063789857727, 0.738391797640892, 1.6566206399612997, 0.04278758049747396, 1.2547722095949914, -1.075944779750995], [2.2188081909700124, 1.003639761972219, 0.7523053249363139, -0.2000220404196442, -0.3168874967525198, -0.16415980657658316], [-1.0815061252445402, 0.14364114495864685, 0.03602565113639751, 0.13843100184579474, -0.4631993389838099, -0.579577924994325], [1.521176112621378, -0.30791536384527224, -0.827769374313355, 1.802486349804294, -0.9022928574546443, 0.7889505877840275], [-1.1229467845126613, 0.04130897171328903, 0.06167703853182597, 0.134788506875362, -0.34562824815100246, -0.7037598930209165], [-1.1378561907133042, -0.09258739932082369, 0.0981207114450329, 0.20404278267009188, -0.17016200457704048, -0.8642109508876219], [0.05694542184633091, -1.2700268343606953, 1.9860100826292424, -1.4782075638972119, -0.764974158993881, 1.674759395784809], [0.07574981441397331, -1.128161946298119, 1.9476178998068083, -1.5459204068079895, -0.9492280024405696, 1.8449149689408397], [-0.10282677621881313, -0.7974219755327584, -0.06711133534832325, -0.6166104878617027, -1.332370091981949, -0.7760056286060656], [1.4699236888741474, -0.13523023487012809, -0.6492744702512138, -0.08502183856726186, -1.2325702171875466, -0.8189410616387667], [0.8046766217431084, 1.6473838539240178, 1.0585185782926332, 1.2820724841077504, 1.3231534811616785, -0.16756691808683744], [1.0375089625964216, -1.1279184170410461, -1.5671303983500053, -0.8424039035647299, 0.7295408606200874, 0.4780259772841493], [0.2772882161657704, 1.0484271472909412, -0.6298200174810682, 0.09084332045759708, -0.8138927754627975, -0.3442783204534804], [-1.051777134601864, 0.15635189123497667, 0.0346403525004358, 0.18516616346191375, -0.4642779801672904, -0.56288099875963], [-0.4505265255276632, 0.03334089044420513, -0.03845576315739376, 0.07580894983370655, -0.5673463083573385, -0.8557341238042938], [-0.4336371190654318, -0.18859475852177582, 1.9339263356873293, -1.237015760850109, -0.42241538878338913, 1.8865741421539322], [-1.1105174995017477, 0.12579870333057744, 0.038902121322495466, 0.0967770847466375, -0.45468656812059277, -0.6023575652719363], [0.29649783896751575, 1.1972132996358804, -0.6701227911747359, 0.018862555647729436, -1.007417755117439, -0.1658477531838907], [2.141170049878033, 0.9558911104169997, 0.7600031263632723, -0.3114932400983208, -0.2941062351858446, -0.2251210319986559], [-1.1241813110767978, 0.04054971887805206, 0.06179944151846839, 0.13301599976475958, -0.3452660025823549, -0.7047292394157085], [-0.10938190854296995, 1.60074348844801, -0.6613531715402482, 0.29216512123212074, -1.18165150297256, 0.47165255367254], [0.775007880646657, -0.6407540079242087, -1.4140266450705474, -1.826729200327298, 0.3465094281497299, -0.09111546641104311], [-0.8347001498924768, 0.009653356885966809, 2.000596757542892, -0.8054297004010332, -0.31240766376008355, 2.279512580443755], [0.8692616849484723, -0.7380841244022505, -1.4532202092388797, -1.379823120788471, 0.4241177993509101, 0.2950660333012614], [-1.1014199002928824, 0.19709331596325824, 0.01958218594585945, 0.062083910862096754, -0.5474778778416389, -0.5168636351545463], [1.5356812155928652, 0.15466536956895985, -0.7257251302227716, -0.17193034482427064, -1.594048155832892, -0.46981997989110147], [-1.1278141194992155, 0.0026792409988120607, 0.07214975043388337, 0.15370320050809613, -0.2953167763819465, -0.7500800631563456], [-0.45663630788647197, 0.7200854525304925, 1.6595718983722365, 5.0661553738564945e-05, 1.2635063125006525, -1.0993166906758245], [2.1844163405787915, 0.6798466762625619, 0.7594881106477018, -0.16012703067328105, 0.001279520432642629, -0.48119350685832224], [-0.4566190245145744, 0.7200960820701857, 1.6595701847304234, 7.547665328707357e-05, 1.263501241062691, -1.0993031198262972], [-0.8700203698770563, 0.20508436753611614, 1.9432228404860892, -1.0139852572094235, -0.5999193461914215, 2.510747782160969], [0.8172386313767898, -0.3296914879810947, -1.4981347482513863, -1.9733200637608967, -0.05694833418577802, 0.28203081839183586], [0.6139370582505805, 1.7126244992098896, -0.6397262707204792, 1.572719824515266, -0.9371478414275283, 0.6425121646118953], [-1.082823786262873, 0.3332570690476671, -0.017227023086457124, -0.001877067864670706, -0.7240259074321056, -0.353517785837527], [-0.544021302352499, -0.1903294736728927, -1.3478715931516472, 0.44032570726527, 1.8737083212114454, 1.2428694458248486], [0.8801996553419459, -1.6525675116685195, -1.3505086676887978, -0.6265256191169861, 1.4697312159994538, -0.22667972718929624], [-0.4566190245145744, 0.7200960820701857, 1.6595701847304234, 7.547665328707357e-05, 1.263501241062691, -1.0993031198262972], [-0.3606088037860721, 0.07124331120151468, 1.6206646062806227, 0.9191646261879164, 1.883787673534396, -0.8024901759014307], [1.2521663150883413, -1.0146723047037585, 1.3718922933261672, 0.0017748265957748396, 0.8525141497389762, -1.2452295681679721], [-0.12510918656933356, -0.9358528575831849, -0.02993663099789216, -0.5579422153671942, -1.154740397123501, -0.9422459462579338], [0.9699127647967354, -1.6327623012525752, -1.4305567331700118, -0.6027166612262965, 1.3848578531080815, -0.12752894685820218], [-0.14540375590207266, 1.5429532531834929, -0.6477914978672816, 0.26634882542169175, -1.1221984020331155, 0.40086995791814034], [2.2461448747310637, 0.7837512070505703, 0.8893345099391893, -0.1874168587812247, 0.10754180289200757, -0.9223507288670239], [-1.0839751783728133, 0.14212263928817298, 0.0362704571096826, 0.13488598762458986, -0.4624748478465148, -0.5815166177839088], [2.1921347152111115, -0.022799343181510428, 0.8025781728266176, 0.433129182448938, 0.7545381240818086, -0.750477160249121], [1.6602848861964412, -0.29258121286945116, -0.9001218003358, 0.5237438818431223, -1.341645055688965, 0.13978908673169976], [0.21463637617982315, -2.495608775826207, -0.17023081395308365, 2.146246463782174, 0.31105144973585563, 0.34126549706762266], [0.21991522002108985, -2.4567259608333187, -0.1807443268506883, 2.1279226058529748, 0.26061922944391686, 0.38790878266798284], [-0.11973052190979767, 0.2801540013281124, -1.3724736748351767, -0.7002954834325844, 1.1817512005836475, 0.5829958138731927], [0.8789532790492489, 2.2632450579195407, 0.8913121965707568, 0.9742675735780825, 0.519226482432005, 0.5707285437618866], [-0.09751803811095652, 1.9109480320846732, -0.7474454649190162, 0.08902257872485539, -1.6006403779306773, 0.8422040360644404], [0.8243915122977062, -1.068588051841809, -1.3638553496092394, -1.2240703283902723, 0.8527916718323872, -0.10140189430179734], [0.4526754966731685, 0.9495667175674827, 0.29455046230851095, -1.2037284937783266, 2.8598246492593997, -0.3259632397129816], [-0.9209407984177701, -0.1804192838630412, -0.08920148450269247, 0.9429821248635526, -0.25292127621398963, 0.10791031770590907], [-0.038161130428434005, -0.41270875104438454, -1.3267158047944738, 0.02489756672282227, 1.9622230495626283, 0.32920139386448016], [1.4531008012467383, 1.1313488569230847, 0.8635364661792215, 1.4844932011768994, 1.4632682074139745, -0.2875300872925886], [-0.6248553223920364, 0.528490898319511, -1.25408479167031, -0.6997435552962342, 1.2735564480441994, 0.5329070306295692], [-0.9018616870523392, -0.03165202202515321, 2.0072558064242063, -0.9018588139034339, -0.2927005388374747, 2.2267775516433512], [-0.006561778331065749, 0.42725330630726027, 1.6230074083885018, -0.27050484076353776, 1.3102086007463996, -1.6025156923182788], [0.3168598998789373, -1.2637331803744305, -0.05726967453197702, -0.818283137252845, -1.0644211820857787, -1.487661486215601], [-0.10053358325130633, 1.873457180623051, -0.7371563574969224, 0.11059603302349304, -1.5508722745145924, 0.797337885521199], [2.0438547651212247, 0.7891319783126902, 0.7996222621744893, -0.373507135610482, -0.11890136677929236, -0.42902775085600575], [1.2948576736912822, 2.323540910175722, 0.9782625545138273, 1.5148160337301888, 0.7731108952521528, 0.16678431017767084], [1.6283114401761014, -0.13433066253514095, -0.865759075048732, 0.47922386985017235, -1.4692471925018902, 0.2559687251808736], [0.4186106220140134, -1.1426351082518615, -0.1571472575975235, -0.5160318028718485, -1.2823121569168818, -0.8406033064658139], [-0.9021876020652714, -0.03185246477365561, 2.0072881208126794, -0.9023267557806328, -0.2926049060073515, 2.2265216441951257], [-1.0803045249278158, 0.35262457593481533, -0.022471865644559833, -0.011211520854702527, -0.7492067590093929, -0.330290492753107], [-0.6736545073104236, 0.516296771014414, -1.2542414236756763, -0.7827599472026022, 1.263433903258544, 0.5158392403134164], [-1.1105174995017477, 0.12579870333057744, 0.038902121322495466, 0.0967770847466375, -0.45468656812059277, -0.6023575652719363], [-0.13208298032601073, -0.9702050523855638, -0.02081738728599822, -0.5461028367508495, -1.1114555287694527, -0.9835739170445871], [-1.0052656274349965, -0.2681831084762863, 0.01029256819626518, 0.9787157663432111, -0.07186247800043795, -0.07200630955984189], [-1.1357323852611159, -0.05564498149052187, 0.0879200197802907, 0.1811889874018959, -0.21966844594403867, -0.8200449915568855], [0.32604319088120526, -1.937782308570306, -0.33381700895822014, 1.2917248627989322, -0.3914495106379681, 0.15929949688864473], [-0.4566190245145744, 0.7200960820701857, 1.6595701847304234, 7.547665328707357e-05, 1.263501241062691, -1.0993031198262972], [-0.6348544850775346, 0.02688509305466032, -0.050150044799980906, -0.2665545635241578, -0.659908955726734, -0.8729727989929261], [-0.12952282475134763, 0.999609157692382, -1.5014963170285212, -1.4403832898386086, 0.29722730471145115, 0.9431068837666461], [1.4738507829068714, 0.14458771427441133, -0.8741974177299406, 0.11637220651972266, -1.8297659688069534, 0.5097127353208389], [0.29690934782222794, 1.1974663839142925, -0.6701635921702834, 0.019453391351263574, -1.0075385036403215, -0.16552463771895995], [0.5429835200093737, -1.297959160338576, 1.8778194082441986, -1.873599364011705, -1.061855175926826, 1.4893305446175078], [-1.1430161043690492, -0.11357894802254392, 0.10362737399156363, 0.20958584295054528, -0.14420630972841575, -0.8895116759105022], [-1.1085555174782151, -0.03893078957561462, 0.08522544043234326, 0.22020895893469794, -0.22764291989224533, -0.7987058000219348], [0.7852956020144612, -0.6344269009639003, -1.4150466699592346, -1.8119583077389445, 0.34349071507766693, -0.08303757978777657], [1.4298997714869566, 0.05903735372908202, -0.7800506608970035, -0.10289152661427492, -1.6288350900028823, -0.091961292455718], [0.5767549692573237, -0.5330529527000917, 1.57454523439194, -0.22644193757977912, 0.7447598614860401, -1.1677058144759533], [-0.6345631368083985, 0.027064276723775794, -0.050178931904828694, -0.26613625184605594, -0.6599944456809347, -0.8727440332437555], [0.9573667137049985, -1.1952917681137996, -1.4730461058331525, -0.8136165571614185, 0.8849307417268601, 0.3226426758120687], [2.4141153749615665, 0.5410906038910349, 0.7418305964454079, 0.5719093290033606, 0.21023576440811034, -0.13740507512914854], [-0.5535909305349368, 1.0534089007235157, -1.3960172157879747, -0.947115637340432, 0.5927214741030293, 1.1625913862344273], [0.5439799135060545, 1.9987114710067824, 1.014313743862842, 0.3371601042191091, 0.8055516826067719, -0.25945532573735225], [-0.4505151136726288, 0.7238500811718759, 1.6589649835634672, 0.008839342643808942, 1.2617101782227746, -1.0945103481349805], [1.648370807557644, -0.6206347577227879, -0.8090294703722151, 0.7397658915616245, -0.8981998202908558, -0.2520509959550368], [-0.617332857997708, 0.8894797776230468, -1.3547318111361342, -0.9479740619769887, 0.7825165826418906, 0.9637971854907952], [1.3607291311541103, 1.9651784658502993, 0.7040853214513487, 0.8351924937864156, 0.3757235287420512, 0.6311921333469054], [-1.1271795728452494, 0.0030694969561238803, 0.07208683529874917, 0.15461426916294582, -0.29550297060423136, -0.7495818191094223], [-1.1105174995017477, 0.12579870333057744, 0.038902121322495466, 0.0967770847466375, -0.45468656812059277, -0.6023575652719363], [-1.0966735447831037, 0.25346676483458996, 0.0041264106731321885, 0.03004396236545598, -0.6221954786651683, -0.44938928075769197], [2.213529347128745, 0.9647569469793297, 0.7628188378339185, -0.18169818249044425, -0.2664552764605814, -0.21080309217694262], [-1.0644860956671134, 0.48708001013994084, -0.059005667956931074, -0.0791606405803255, -0.9249397360704026, -0.16912567282436938], [0.27760354850047875, -1.6348072852641007, 1.795532256785787, -0.6616228435279445, -0.5728425232850344, 2.387685481853595], [-0.5732700152055393, 0.5474639786591144, -1.3290082531192005, -0.4177124415219995, 1.1681519591244145, 1.0555787756790607], [-0.8981053366725542, -0.11292075628126028, -0.10645078993769641, 0.9369141322228659, -0.33294674060638824, 0.18958818171018504], [0.8525863832527951, -0.6943777288643159, -1.3933105056593575, -1.6416852038732246, 0.46275073473643435, -0.1510499465663175], [-1.0890416437924784, 0.5076142149479357, -0.0665611086773425, -0.1403200870157509, -0.966617688453359, -0.14590825843148253], [-0.45663630788647197, 0.7200854525304925, 1.6595718983722365, 5.0661553738564945e-05, 1.2635063125006525, -1.0993166906758245], [1.1393708603169008, 1.948434622299906, 0.8470145282137651, 0.36259795765924424, 0.49172673069624256, 0.03150356886558877], [0.9675731141231646, -1.3977666985874568, -1.4889212164787926, -0.4485292958826092, 1.0605259200992132, 0.5790828861116615], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [-0.12422423326125977, 1.2586439452680744, -0.5665377713153573, 0.5128834662013436, -0.7205501070690618, 0.06291022854299769], [-1.0691122905913162, 0.4307804618779058, -0.04356180657490359, -0.04694816805825272, -0.850257393815555, -0.23650567750546392], [0.952214767457153, 1.7748109328670847, 0.9553600314972281, -0.06227660030463564, 0.7346780954634278, -0.6826146407068375], [-1.0341734138728063, 0.11878952816243102, -0.026923726611412773, 0.44431034484384163, -0.5108280393240812, -0.10938984298941158], [-1.1066987441341487, 0.15821050097036907, 0.03009569884346396, 0.08040776879129669, -0.4970456575497005, -0.5635069207549063], [0.29007785218656584, 1.032901807395998, -0.6245307250729523, 0.1262088703405046, -0.7855592953301416, -0.3621312994253059], [1.755052336609587, -0.8814116937275482, 0.08231472576409381, -0.36777683424764746, 2.852059736196209, 0.06698860522412996], [1.808826431805979, 0.5008351049764008, -0.8027579916659587, 0.09073022426139203, -1.9186130903260106, -0.04285544123025557], [0.5202807169433498, -2.146470164219797, -0.48892034966215486, 2.075813135208738, -0.3209046772061796, 0.9861191140766243], [-1.1146325880488694, 0.12326786054645415, 0.03931013127797045, 0.09086872771129585, -0.45347908289176747, -0.6055887199212426], [0.6000665442297305, -1.4537861233864628, 1.8523014341284012, -1.4541591935675748, -0.9244565739103122, 1.803827310197326], [-0.9021876020652714, -0.03185246477365561, 2.0072881208126794, -0.9023267557806328, -0.2926049060073515, 2.2265216441951257], [-1.0928670878770161, 0.25580779440990387, 0.003749001464317814, 0.03550919262314688, -0.6233124025018316, -0.4464004627070836], [1.199433706628629, -1.3796292105530799, -1.5580878755351715, -0.15587021508478016, 1.0561505405495395, 0.6836015404531799], [-0.9435062938482569, -0.17647928841935184, -0.09195917997421167, 0.8976315090574866, -0.2707415393334048, 0.11144113827908567], [-0.07986981520248568, -1.1702363389868893, -0.03430007620739356, -0.10370069063820855, -0.9161000153891028, -0.7220454877067431], [0.9641657382759578, -1.7562247351068698, -1.3886822069684432, -0.19439043216651306, 1.5503583002726964, 0.15142388589974193], [-0.6912786167930551, 0.46982142766915497, -1.2425038809116469, -0.7821611603794267, 1.317488579236958, 0.4595024907651369], [-0.40516138552179165, 0.5374182576684038, 1.6411673475734918, 0.4284412992471718, 1.4346368196108525, -0.8171187502739511], [0.4456152548121596, 1.2048976381474374, -0.7347363019258895, 0.4927344270205671, -1.0436744557133266, 0.34840919049182295], [0.14907575613210897, -0.018991606958080706, 1.5316719482282475, 0.6135810418238762, 1.6853793158155002, -1.0609947913536373], [-1.130624729103409, -0.01686744955745209, 0.07742348009683361, 0.16261934182002585, -0.27005043485045793, -0.7735361219899366], [-1.096692531961554, 0.004001373871871975, 0.07405911160597876, 0.2113384722620445, -0.2800071165503042, -0.7468926669826844], [-0.07942565218421017, 1.8686207606652259, -0.7342541448991744, 0.15385390876764138, -1.5326243201271266, 0.7926625899712378], [-0.392279199440634, 0.92029520394057, 0.5620454472613929, -2.0525818311833106, 0.6952747335831784, 4.018299609533788], [1.4479709466343653, -0.05964089719705152, -0.6720731538585457, -0.18129889667903779, -1.3483367913383377, -0.7299324013058495], [-1.1172564318166824, 0.08044487098459054, 0.05112272463867389, 0.11705548464969633, -0.3961812169658235, -0.6567934919556259], [-0.9022563240440081, -0.03189472984815047, 2.0072949345789355, -0.9024254253431228, -0.29258474100403026, 2.2264676839124817], [0.4279096697065714, 2.062030983315501, -0.902934811881742, -0.36232754330096206, -2.1214093904520994, 0.8723001916595772], [-1.0547341423221068, 0.564350101108588, -0.07995280540285868, -0.1169652127462451, -1.0255677510464756, -0.0764717617039838], [0.9605511014563831, 1.7264835393750377, 0.9695186615195096, -0.011452803121237273, 0.8055568592474763, -0.7398165082426167], [-0.29870398765921674, 1.1513362112212462, -0.5492381492032219, 0.26236912790286593, -0.6693527333668767, -0.07409072858760184], [0.1755568643531234, -2.3287093755977812, -0.14649788339248906, 1.752655355367663, 0.16837009984323262, 0.0409050330785208], [0.7694590704906612, -0.7510753459425681, -1.3835061312664203, -1.756986733951345, 0.49478737595348254, -0.22296743658885618], [-1.1012997397073063, 0.1971672165725547, 0.01957027205515969, 0.06225643488752884, -0.5475131364103208, -0.5167692854387865], [-0.48739945889755254, 0.17797271198932582, -1.3735878946244544, 0.08052813839952305, 1.5074124203786134, 1.1876468022558677], [0.5349866552071134, -1.9108556396806278, -0.4805103493493251, 1.7335429274191139, -0.5282514795255181, 0.7704891711165828], [-1.0521999487172546, -0.02926900688207238, 0.01326174422005602, 0.5179897623013701, -0.3176502116727267, -0.28689145329732574], [-1.1332641510240422, -0.03630885705389676, 0.08268023654563605, 0.17178127078462574, -0.24483432470448874, -0.7968577647901166], [2.0775356808953727, 0.5731452237113246, 0.9360224182290093, -0.3517930399306764, 0.30366635288795585, -1.182237202023611], [0.15708091090869017, -2.464799228072893, -0.10970058825087234, 1.8167888581198626, 0.3448828708650174, -0.12234646652273881], [-0.44442213986669477, 0.07830446683288031, -0.050613451338438406, 0.05461948127579804, -0.6256654652898225, -0.8017964411675276], [1.1163636117006264, -1.6557834898709756, 1.5493542964889107, 0.5414959846656534, 1.6565308454607701, -1.590334771542308], [1.2182101916292758, 1.9791039019291035, 0.8441926875441647, 0.4887453559236052, 0.4930346333114743, 0.07215878812167942], [-1.1357323852611159, -0.05564498149052187, 0.0879200197802907, 0.1811889874018959, -0.21966844594403867, -0.8200449915568855], [1.0444726702871912, 2.2057315332103262, 0.8413164569708775, -0.20179731309492824, 0.19433278960008876, -0.16394130779176494], [-1.1125750437753084, 0.12453328193851586, 0.03910612630023315, 0.09382290622896672, -0.45408282550618023, -0.6039731425965896], [0.31857342687646817, -1.2448612125044136, -0.06243462874079752, -0.8287744465503966, -1.0893656080552623, -1.4650668532115156], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [0.8848940524505503, -1.1736565254529328, -1.4110368440423446, -1.1644919570768435, 0.9231392968105127, -0.1526822233820292], [-1.128241681325771, -0.015401838501166302, 0.07718720153161811, 0.16604087137919213, -0.27074968954647066, -0.7716649603325233], [0.5433094350223058, -1.2977587175900738, 1.8777870938557248, -1.8731314221345063, -1.0619508087569487, 1.4895864520657331], [0.04491206753730889, -1.3308818996341523, 2.002188361931125, -1.4566301340387535, -0.6881183433271483, 1.6015633127349629], [-0.3869247062053312, 0.23949971882635, -1.3024067891306939, 0.35568999761675274, 1.5853624551927306, 1.1953325821664775], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [0.24374040288866022, -1.4654738473348432, 1.8266496098674196, -0.7177566951885931, -0.7166870968974408, 2.516984326882167], [-0.688909161187151, 0.4178243121401292, -1.2277536364539265, -0.7399044808972733, 1.3901181966028027, 0.3976154489878636], [1.104422554371947, 2.1046025208128816, 0.7167828183278016, 1.4513778178833137, 0.539001058278068, 1.0323599678945037], [0.8256381159570565, 1.3902286237468604, -0.8322341442243764, 1.272232339190645, -1.196568704960929, 1.0864707211501923], [-0.4257887866585741, 0.9473015810920423, 1.5981349962700466, -0.10702687539275169, 0.9687999273895668, -0.8267513741100718], [-0.14408568478741082, 1.5794001319966635, -0.6579123011827419, 0.2423381738459756, -1.1714684177923091, 0.44440325716854256], [-1.0925082480291977, 0.2738466088353876, -0.0012816358671602218, 0.02307285218956054, -0.647859324333986, -0.42486952581354914], [-0.6410451118758346, -0.38167360785840226, -0.009453751480066148, 0.2174572991493776, -0.2106445113893955, -0.8631484297982218], [-0.36580933084026457, 0.39503097879790683, -1.3444608407211127, 0.2823945658999535, 1.3836335740249759, 1.381905724567917], [-0.6836303173458844, 0.4567071271330184, -1.238267149351531, -0.758228338826473, 1.3396859763108637, 0.4442587345882232], [-1.1116491488522062, 0.12510272156494362, 0.03901432406025105, 0.0951522865619185, -0.45435450968266566, -0.6032461328004957], [-1.0717517125119496, 0.41133905438146123, -0.038305050126101366, -0.03778623909365253, -0.8250412836695857, -0.25982732030564415], [1.385642826845917, -0.1586076541941039, -0.7222792764172644, -0.028018511070216193, -1.3546356226453482, -0.35380646935358606], [1.0339938394385935, -0.8808930706100206, -1.5555695421644629, -0.8978702783123444, 0.4958217224778955, 0.701547709658527], [-0.05243557128653002, 1.159742502464364, -0.6069366579117508, 0.9186339208957647, -0.6531467465232368, 0.44605516158816644], [-0.575144052285189, 1.325243357512718, -1.4738011646589526, -1.185286034821823, 0.20797973281431084, 1.485654773973054], [-0.20105718292317645, 0.4032526322629076, -1.3395568920043341, -1.141592599347657, 1.0759080061644348, 0.22821384358433447], [-0.7283826608768637, 0.4882111548245352, -1.2503774645602044, -0.8653883778084206, 1.2718480172071371, 0.4795130495841843], [-1.1388689098782412, -0.07539211479528847, 0.09322606383171443, 0.18963718683662653, -0.19430647158242745, -0.8437569578387016], [-1.1205810180436662, 0.051673015697235467, 0.05894494417674307, 0.13170944675144589, -0.3585432459691543, -0.6912777121089726], [0.31480686569674027, -1.3362683175782615, -0.0370858829117471, -0.769424619909505, -0.9660522512243144, -1.5742702301025993], [0.6958149788696649, -2.282635648119244, -0.5923407087208593, 2.5732754149026373, -0.2523704028048603, 1.4010825713883195], [3.0545569274227, 2.770207590973579, 0.6758823475765972, 3.240510341548397, 0.31115644853256474, 1.1815248174253403], [0.0778415139224757, -1.1268755189109494, 1.9474105083464397, -1.5429171889269253, -0.9498417671823818, 1.8465573648490816], [1.137591243315812, 1.7462753391286088, 0.9769404699298819, 0.3074954607599696, 0.8758163338369207, -0.7070510277297916], [-1.1276231876436482, -0.03283957776542052, 0.08212093649867098, 0.17988044660867183, -0.24648954545616228, -0.792428497996847], [-0.6611116868638082, -1.438294558750759, -0.16552614010376382, 3.209285222554106, 0.9696295154943854, 1.1790321712842813], [-0.3583888442233852, 0.7740417748837921, -1.5312359585720288, -0.10983624937327098, 0.7607497894779843, 1.9051716652397452], [0.3177994802968418, 0.4430192243267064, 0.19601316898616894, 0.5760099870896248, 3.5456616756777755, 0.8741446332022891], [-0.8166611666850929, -1.0276317353192537, -0.06420929362983481, 2.3512288771413843, 0.6433162184817895, 0.595082056612905], [-1.1263705588164137, -0.049887314156641306, 0.08699179713158532, 0.19463049965729773, -0.22241547483961577, -0.8126941147297129], [-1.1387833160364607, -0.07533947326537874, 0.09321757722464061, 0.1897600806629616, -0.19433158727518693, -0.843689749821996], [2.081679949149817, -0.09220308869072945, -0.7709708343282078, 1.1035165286718642, -1.1698278726094442, -0.16770334119057467], [0.4348764743479867, 1.0022937765362019, -0.6787259116491137, 0.6197830193388874, -0.7716654819379762, 0.1062361870297308], [1.050520855725491, -1.2565010974655801, 1.579365062907938, -0.2700953497674148, 1.288118908119258, -2.112325179536497], [-0.44442213986669477, 0.07830446683288031, -0.050613451338438406, 0.05461948127579804, -0.6256654652898225, -0.8017964411675276], [-0.4653801154680738, 0.6556804686453731, 1.6769863430020833, 0.030402059076247108, 1.3470415795791062, -1.176576016112771], [-0.10581242709257291, 1.834574365630162, -0.7266428445993177, 0.12891989095269282, -1.5004400542226535, 0.7506945999208393], [-0.6103083812152237, 0.9828905509290852, -1.3804035784469337, -1.0026462423530003, 0.6582472597552821, 1.075558667517735], [-1.0691122905913162, 0.4307804618779058, -0.04356180657490359, -0.04694816805825272, -0.850257393815555, -0.23650567750546392], [0.542914798030637, -1.298001425413071, 1.877826222010455, -1.873698033574195, -1.0618350109235046, 1.4892765843348645], [0.995338472093254, 2.0016113744204005, 0.8187317445258653, 1.4515626475884336, 0.7273249328678912, 0.8330024524502703], [-1.0760371698514317, 0.39088530977136693, -0.03288508969510901, -0.030987652943189274, -0.7993421794320864, -0.2844414249655467], [2.7380001227200568, 0.6814983836257269, 0.8806496847559423, 1.0116787291940585, 0.410665275972825, -0.5214622973904622], [0.29385841704688237, 1.1777718921394351, -0.6648660347259338, 0.028024484612329165, -0.9822016449714699, -0.1891693959840705], [0.6250055640830882, -0.8115927032780484, 1.5750963662558874, -0.06383974811406255, 1.046321909421397, -1.426495109580354], [-0.9109655648818141, -0.09627845465388356, 2.024705951925157, -0.8720243976215176, -0.20905961680149865, 2.1492355001745906], [-1.0922365315200786, -0.0951014067000716, 0.02878380089195488, 0.4904601004172601, -0.2493743730403964, -0.3674725909413447], [-0.5931822593444368, 0.7617877882768461, -1.317165869055938, -0.809686702871433, 0.970963126292568, 0.8127667442317282], [0.8163867874149884, -0.41930599711094796, -1.4730749958737983, -1.9097853478318727, 0.06550976085759241, 0.17511606833885596], [0.31233437049131574, 1.313861744614548, -0.7016633298675504, -0.03610901813987029, -1.1587144159932545, -0.025917896382811043], [1.7534467303882093, 0.14604953321576136, -0.7073560565476701, 0.2443452127941, -1.4624137921984364, -0.4688245949002932], [0.16897600090635947, 0.14136536947261572, 1.488176053491309, 0.5344900452245421, 1.476362289932175, -0.868729581506382], [1.9960742110342629, 0.5230451661253762, 0.9440993017055995, -0.46875369413088874, 0.32756948898073185, -1.2462004932303543], [-0.1894483867331376, 1.21853008713972, -0.5600708135210802, 0.41923600719118165, -0.7014114661921838, 0.01169642735148829], [0.4591712042631443, 1.320143490055144, -0.7660507215013797, 0.4344884417639814, -1.1943019282753282, 0.48654834138625674], [-1.1028054629862265, 0.14278680639372016, 0.03470473948795023, 0.09894921454950129, -0.4737464302044912, -0.581699105549312], [-0.6123659254887844, 0.9816251295370235, -1.3801995734691967, -1.005600420870671, 0.6588510023696946, 1.0739430901930818], [-1.1367438740259983, -0.05626706264685943, 0.08802030862734643, 0.17973671324260895, -0.21937164607479342, -0.8208392093696849], [2.7655974731525585, 1.2457675281794616, 0.797870452740761, 0.4547897464818261, -0.24041353948646807, -0.3444847584345559], [0.7565319272022234, -0.7768438603993211, -1.377229349928931, -1.7625956975750985, 0.5230221991715144, -0.2543669660123024], [-0.8702956658250787, -0.012238433196699969, 2.0041260436577493, -0.8565369887567368, -0.30196291653074636, 2.251563092727252], [-1.0992775810687414, 0.21622899765138084, 0.01437471709967839, 0.05220825236762772, -0.5725481237719663, -0.49393231583600267], [0.8943718748741667, -1.3816449875690353, -1.3520358662114653, -0.9954652391482295, 1.2136577662738923, -0.40023039049112263], [-1.1192833996434755, -0.2720987483953718, 0.07642101689555622, 0.5681907754703862, -0.02146339354361296, -0.5799522998624095], [-0.6598755200601845, 0.6316797946010201, -1.2855779573907529, -0.8406856995078723, 1.1127409849971412, 0.6541535197898427], [-0.6176447693300511, 0.942742314544134, -1.3696860605715915, -0.9872765629414711, 0.7092832226616331, 1.0272998045927217], [0.4082699413316785, 0.9146578596923424, -0.6561076470275514, 0.6333881427594553, -0.6660918489616809, 0.0003481126967154875], [-1.129937924951404, -0.034263176831489786, 0.0823504420986255, 0.17655699577629233, -0.2458103350149481, -0.7942460224870821], [0.8832318866493926, -1.2182687725512196, -1.4643514146904244, -0.6070925253371113, 0.8749651114506677, 0.7178910373490335], [0.5429320814025348, -1.2979907958733778, 1.8778245083686418, -1.873673218474647, -1.061840082361466, 1.4892901551843916], [-0.6049430782531391, 0.31416708870177906, -1.265927175733572, -0.3077692939468003, 1.4707452808760462, 0.7757190620769016], [-0.11615008599583872, -1.2103673862435416, -0.02570783837258384, -0.1428395804375248, -0.8810127134974878, -0.7717818197650044], [2.0179535403436764, 0.43345214864664716, -0.768547246902242, 0.533457606292378, -1.7111190863386025, -0.11239043670361572], [-0.4547513110308468, 0.41276363338657385, 1.7458633181751375, 0.2269844428017816, 1.6861055736643844, -1.4657187896826818], [-0.7283826608768637, 0.4882111548245352, -1.2503774645602044, -0.8653883778084206, 1.2718480172071371, 0.4795130495841843], [-0.688909161187151, 0.4178243121401292, -1.2277536364539265, -0.7399044808972733, 1.3901181966028027, 0.3976154489878636], [-0.688909161187151, 0.4178243121401292, -1.2277536364539265, -0.7399044808972733, 1.3901181966028027, 0.3976154489878636], [-1.0768968675568038, 0.14981193009520666, 0.03463343823996081, 0.14262400502754494, -0.4691279426450737, -0.5719803459813348], [0.3554356573351139, -1.4567008193467088, -0.38545963314494897, 1.1280878475867213, -0.9281242161004389, 0.6636502892599734], [2.9512391898070605, 3.004241715864285, 0.757157281734599, 2.807880353637908, 0.1481068869929452, 0.8870130799096002], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [0.2914852716533404, 1.0571585548961668, -0.6312276518274565, 0.11122715222952492, -0.8180585995022441, -0.3331308369133727], [0.07125166580685258, 1.3249026687002987, -0.6441755074602635, 1.0314636624716826, -0.8116482756470372, 0.649419877922948], [3.115876943719322, 0.1456304177426125, 0.6762680160819693, 2.447333422152318, 0.8162240412214479, 0.4929438770172246], [-1.0635831782194898, -0.11311539826363666, 0.035932945298704805, 0.5575030887884787, -0.20889883444830384, -0.3874724215344522], [-0.6372025587290314, 0.007622869227331057, -0.04492217545602619, -0.25697432288145594, -0.6347783355349657, -0.8960656760439354], [0.22936688170848707, 1.712567875336896, -0.8957205262782671, 1.115369391822943, -1.471251589878126, 1.688181263847569], [-0.9669426204189949, -0.511619193068234, 0.00027557786470355376, 1.0971101190770325, 0.1760846547054148, -0.2894461774257092], [0.7747379143319274, -0.7121925309496788, -1.3940196441640254, -1.7753105918805447, 0.4443551556615441, -0.17632415098849608], [-0.0795820369322959, 0.9292295395695643, -0.5443043914771443, 1.0350762612098394, -0.3518816585233131, 0.16974971810024453], [-0.8950948781230732, 0.02039099631989237, 1.9931620267497125, -0.9269469320457241, -0.3603662998047135, 2.2891921755592595], [0.8731332335418476, -1.6212772250280003, -1.3597981507199777, -0.6625745481522103, 1.42292145139399, -0.1897299055368562], [-0.5837861381604553, -0.7203725159031752, 0.008622112757423611, 0.44074884261943365, 0.17839603200870044, -1.1932148976785282], [-1.0921790533249562, 0.2205947014539935, 0.013670899926484164, 0.062400168253591765, -0.5746310357916896, -0.48835857406594896], [-1.125174697578582, 0.02212064849525668, 0.06689299398508099, 0.14454127154349622, -0.32053288652791584, -0.7267584203561657], [1.2627798327238289, -0.9130354306384091, -1.6842500303064287, -0.3757565102351778, 0.45096389704627726, 1.243320967657498], [-0.1490688132801324, -0.6115255199400678, -1.2791055585154356, -0.03940643800300775, 2.1739236832739417, 0.08636056119208928], [-1.1123890968301278, 0.11907460169906749, 0.04065001273661656, 0.09814079101696235, -0.4464926887348794, -0.6104733218201966], [-0.7082789036109999, 0.3168210104817211, -1.2008578392767026, -0.7029573716272861, 1.5180099751758873, 0.27616050301300443], [-1.0955065097976497, 0.23636638691345943, 0.009005757913120254, 0.04467112158774679, -0.5980962923558624, -0.46972210550726334], [1.5147938615249146, 0.1952736925694299, -0.7386393298808548, -0.2407746309654606, -1.661244089669024, -0.4224731340357079], [-1.0839751783728133, 0.14212263928817298, 0.0362704571096826, 0.13488598762458986, -0.4624748478465148, -0.5815166177839088], [-1.0808370132593037, 0.37011520981325763, -0.027414128019682345, -0.02492761142214368, -0.7734921395409837, -0.30945942395661236], [-0.011823338800434553, 0.3883811208540641, 1.6335192076442937, -0.25215616773478944, 1.3606357496003767, -1.649145407069112], [1.8871153129855653, -0.3440109110722508, -0.9424701851149366, 1.1869037799950877, -1.2540552193614873, 0.5875709815954152], [-1.1441477537195077, -0.11427492978817781, 0.10373957672931909, 0.2079610447658265, -0.1438742512904889, -0.8904002434390617], [1.5523446922335293, 0.34816033307144484, -0.8521317442834845, -0.08250636219680159, -1.9580634831775068, 0.25917219974724076], [0.5215296584734507, 1.8479322799693465, 1.0549378253964465, 0.40448803786364557, 1.0000275568791128, -0.440430427293271], [0.08201993009234734, -1.0853334631444715, 1.9360708963656201, -1.5652458890609113, -1.0045272053805896, 1.8963150023865185], [-0.8095090619123128, -0.1657876377388758, 1.978240860582025, -0.43177929698079914, -0.16565105122017548, 2.568967897491447], [-1.0994993884679407, 0.19827446029060858, 0.019391767699639344, 0.06484134109049058, -0.5480414111979318, -0.5153556552797149], [-1.1159338506106564, 0.04562203398599201, 0.06098170796570572, 0.1448575289349909, -0.34768604447796625, -0.6982533592675683], [-1.0593603372463094, 0.5080505528465529, -0.06450894402083134, -0.08475274022417213, -0.950885408791628, -0.14385176638507852], [0.36897798192231457, -0.023498695657202844, -1.4651828394330149, -1.7090608797795113, -0.12097979593694867, 0.8012570657980642], [-0.45663630788647197, 0.7200854525304925, 1.6595718983722365, 5.0661553738564945e-05, 1.2635063125006525, -1.0993166906758245], [1.2064609609635268, -1.156157914980073, -1.774672328578497, -0.23708716263203108, 0.5386472874667052, 1.518732423903261], [-1.1389717870919192, -0.07545538586489157, 0.09323626408060141, 0.18948947791074297, -0.19427628445170672, -0.8438377367049341], [-0.9021876020652714, -0.03185246477365561, 2.0072881208126794, -0.9023267557806328, -0.2926049060073515, 2.2265216441951257], [0.29133064339964254, -1.5823421467286096, 0.03017753830693207, -0.6347610667885416, -0.6414224901797795, -1.868942986966919], [-0.9022390406721104, -0.031884100308457314, 2.007293220937124, -0.9024006102435748, -0.29258981244199134, 2.2264812547620108], [2.155583762927549, 0.4521563212097198, -0.9181600148946749, 0.8469825176630665, -1.8758789566965834, 0.485302657214463], [0.9481352269308695, -0.84371248733719, 1.6105969941774165, -0.8309025864800773, 0.8803795599733012, -2.1936132584430226], [1.376359086634857, -0.36588968413016404, -0.6648508299743733, 0.10517010334579925, -1.0754089005432668, -0.601483163945439], [-0.17343745114463635, 1.2406221408456044, -0.5650910290328315, 0.43332352062721835, -0.722906463737054, 0.03887115665536912], [-0.820597416505898, -0.5176183771930696, -0.14177348437108375, 1.4450000202613054, 0.05000611526658629, 0.2792377228061979], [-1.123376395630471, 0.058862876661334405, 0.05672457590781676, 0.12122012523380443, -0.3699438158133138, -0.6828480453581892], [-1.1225533779210468, 0.05936904521815906, 0.05664297391672188, 0.1224017966408727, -0.37018531285907885, -0.6822018144283277], [0.3051532794740477, -1.3422054216655361, -0.036128732357198406, -0.7832850346787128, -0.9632196116260138, -1.5818501957944076], [-0.6663988969872523, -1.32790412247422, 1.9399044467212259, 1.3464620963265215, 0.9690863586316638, 3.2508716846903796], [-0.43778162685676625, -1.2259962538459936, -0.06677306030797717, 1.3498584997240206, 0.6297182366105605, -0.7230053472567923], [0.7311712176911652, 2.292018227760847, 0.9458031903646367, 0.4764098657290774, 0.506208095830737, 0.10001847018610248], [-0.9021876020652714, -0.03185246477365561, 2.0072881208126794, -0.9023267557806328, -0.2926049060073515, 2.2265216441951257], [-0.653888963519003, 0.31463541585862015, -1.1962604707334836, -0.5989623366856349, 1.5509336596498613, 0.27636895982758986], [0.5238803005532945, -2.725037242935024, -0.3767935306530367, 3.6900481820346975, 0.3648683733482454, 2.0731648197004278], [0.4848880870321703, 1.8253971496509556, 1.058570827641986, 0.3518788451495582, 1.0107792468536174, -0.4692012745216268], [-1.1198958537373158, 0.06100346348814587, 0.05637948108747618, 0.12621741361429628, -0.37096510681985423, -0.6801151347558058], [-0.6466784104570179, 0.7288868320832432, -1.311861739634765, -0.8864953443308718, 0.9866604342672948, 0.7707617337907424], [1.3077704068850529, 0.3530410404039671, -0.7980339436506143, -0.7452394067153197, -1.9915635057064112, -0.24504047126504896], [0.34435243209198807, -1.0864617484056098, -0.10495107801375632, -0.8953739372390437, -1.2924629322328438, -1.2748318432460168]] print(f"data has {len(data)} entries") center_points = [[0.034263148941797085, -0.1159878632167158, -1.354003405203193, -0.8650113508074581, 1.0156116253542034, 0.4817964241472219], [0.9218557884644116, 0.7116445830799667, 1.1123934237356012, 0.14194169006315296, 1.011490905244407, -0.6711496861475166], [-0.6937332641459568, -0.443989593037132, -0.029277708610171257, 0.3924171627311612, -0.4039357793744134, -0.473170515212907], [0.7590346369367189, 0.830787011873216, -0.6790700154511047, 0.48960099396540635, -1.136947295960624, 0.22894960696987418], [-0.17444734370440676, -0.6740414604313897, 1.8622281726084713, -1.0928181437635947, -0.49237147713980045, 2.1529579594640778]] print(f"center_points has {len(center_points)} entries") def euclidean_distance(v1, v2): dist = [(a - b)**2 for a, b in zip(v1, v2)] dist = math.sqrt(sum(dist)) return dist def find_all_dist_with_target(data_points, target_point): """ Finds all distances between the target and the other points. """ distances = [] for dp in data_points: distances.append(euclidean_distance(dp,target_point)) return stats.zscore(np.array(distances)) def multiprocess(func, jobs, cores): results = [] if cores == 1: for j in jobs: results.append(func(j)) elif cores == -1: with mp.Pool(mp.cpu_count()) as p: results = list(p.map(func, jobs)) elif cores > 1: with mp.Pool(cores) as p: results = list(p.map(func, jobs)) else: print('Error: cores must be a integer') return results mp.cpu_count() func = partial(find_all_dist_with_target, data) results = multiprocess(func,center_points,3) results for i in range(1,1000): print("Start") test_past = np.allclose(results, multiprocess(func,center_points,12)) if test_past: print("Test past") else: print("Test failed") print(i) print("----") ```
github_jupyter
import time import math import multiprocessing as mp from functools import partial from scipy import stats import numpy as np data = [[-1.105280555702782, -0.17439617189603107, 0.05005734630226715, 0.5235379869549062, -0.1477803698812845, -0.46271142578117913], [-0.05885403735978791, -1.0682207002148314, -0.06135907736680796, -0.13828445709405934, -1.0444747880537166, -0.599298079872161], [0.9885348141881728, -0.5460212430344279, 1.5301031858037542, -0.9712219466773333, 0.4942560425811777, -1.8365077902534435], [-0.6037399470269699, 0.6840221582910677, -1.2961388432607295, -0.7730389870130335, 1.0718275668764454, 0.7194801730310086], [-0.1931389171612762, 0.46157685475224125, -1.355327161350741, -1.1690783862414569, 1.0002596757265272, 0.2981787719848743], [0.5089944259437953, -1.556071607882815, 1.9476875009825, -1.7499790635589294, -0.7264959372645678, 1.1797569295568469], [0.3132927168428071, 1.064997428813059, -0.6318275312991293, 0.14658872655261196, -0.8168128316202363, -0.322653883378593], [0.04415364740337862, -2.1119482527087157, -0.062438281288370936, 1.2797008306909692, 0.013561624990343996, -0.2756593349187367], [0.9848408453831758, -2.335729791751807, -0.5534586982933313, 1.7572470676062295, -0.4083057184190674, 0.6071862283695229], [-0.45663630788647197, 0.7200854525304925, 1.6595718983722365, 5.0661553738564945e-05, 1.2635063125006525, -1.0993166906758245], [1.0359138564588737, 0.6026203859330609, -0.8266938304714567, 3.906125043518653, 0.064711760397289, 1.9692599025162034], [0.5435151894496619, -1.2976321754508675, 1.8777666933579515, -1.8728360042827392, -1.0620111830183903, 1.4897480097981988], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [0.5034932198323815, 2.045084020929738, 0.9983477551573342, 0.22722394602292703, 0.719665130298287, -0.20624864303756138], [2.431880428057342, 0.6769843249322764, 0.8594884650197583, 0.43859209395714394, 0.24842861284538237, -0.5426885697956173], [0.5429835200093737, -1.297959160338576, 1.8778194082441986, -1.873599364011705, -1.061855175926826, 1.4893305446175078], [-0.585701739967722, 1.24747772752694, -1.4527741388637423, -1.1486383189634235, 0.30884417339818787, 1.3923682027723339], [0.7210778161557181, 1.7741505259430754, 1.0168567935064885, 1.032527537592702, 1.1032674938802989, -0.020716668937283956], [-0.023401930159563454, 1.373597984925255, -0.6647609788485772, 0.8178527022851653, -0.9305239581288983, 0.7025932323901457], [0.01011757838433386, 0.5499931422214072, 1.5898209807704355, -0.3283172100802974, 1.1510199317920775, -1.4552778676312659], [-1.1298189947658828, -0.016371910540320778, 0.07734359174755166, 0.16377619812754576, -0.27028686045826195, -0.7729034619096025], [-0.0679754343478575, -1.496399992293454, -0.015377140493243444, 0.35447102286597393, -0.5699087436162079, -0.6130241192268077], [0.9924775296269923, -0.9895570677942931, 1.5813472008214047, -0.44270682942013573, 0.9970750278865798, -1.8678712905648072], [-0.07018036993830022, -0.914823467481481, -0.1051916005356062, -0.2711105615017444, -1.2611259677917053, -0.41694888805588026], [0.3581948420282566, -0.75560806594209, -1.3341201474622957, -0.9985148585501945, 0.7695815260793727, 0.42499022265807745], [-0.45387590648906295, 0.7217831418700824, 1.6592982052941034, 0.0040139874530455665, 1.2626963314091562, -1.0971492321370693], [1.7049033201175539, -1.250798436159996, 0.1821930982913399, -0.19370018392024885, 3.331165828969625, -0.3761226079792889], [0.5189348503442601, -1.4786856043146548, 1.9267216766806121, -1.7875130329726303, -0.827179255064121, 1.2725588275601716], [-1.1105174995017477, 0.12579870333057744, 0.038902121322495466, 0.0967770847466375, -0.45468656812059277, -0.6023575652719363], [-0.8451045778449146, -0.18767942782154254, 1.9817701466968831, -0.482886585336503, -0.15520630399083815, 2.5410184097749453], [0.5429835200093737, -1.297959160338576, 1.8778194082441986, -1.873599364011705, -1.061855175926826, 1.4893305446175078], [-0.3467738190483387, 0.4057847710510829, 1.6089626469520777, 0.8327520426544388, 1.5395663045859844, -0.4737023010484321], [-0.09398954003744588, -1.321465166225436, 0.00706035845871812, -0.020361051961328657, -0.7164238591103511, -0.9031256672043615], [-0.8351371201295263, -1.1637215877943656, -0.027411998488218053, 2.4153623798935837, 0.8198289895035742, 0.43183055701164563], [-1.1308477669026633, -0.017004621236351623, 0.07744559423642032, 0.16229910886871032, -0.2699849891510557, -0.7737112505719289], [2.1992841327256465, 0.9025415537094064, 0.7792164224868684, -0.16329649453848197, -0.18895043748335497, -0.28573592085079147], [0.8507857315040394, -0.8741739768773626, -1.416422914097263, -1.3156896180362718, 0.6006305703726948, 0.13181453370000182], [0.8111334330259322, 2.3643463003893124, 0.7769328979694332, 0.6423784974425581, 0.23616153637146164, 0.7586767101203021], [0.31411825866250903, 1.0710781902088449, -0.6334717065825682, 0.14372311592390297, -0.824699768260782, -0.3153594863421865], [0.7406953956784234, -0.8934923053779887, -1.345688811236116, -1.707624123787499, 0.6743188600473297, -0.3942968228133822], [-0.45612485537395925, 0.7237360244226224, 1.6585859817894615, -0.0016398772003042115, 1.2587801226302524, -1.094936695462614], [0.995997641756315, -1.8432898466941217, -1.443011132166486, -0.2018798643238507, 1.5802354580966909, 0.12012981656309042], [-0.6690299526257079, -1.4966187812400924, -0.14975587075735658, 3.236771009447906, 1.0452778459322931, 1.1090672428837416], [-0.4732810978580757, 0.5973668756957324, 1.6927548987066772, 0.05791266106959538, 1.4226848385790527, -1.2465273736637836], [1.50124289557179, 0.2760302519548516, -0.7622710474142799, -0.3249885965181798, -1.779475986411507, -0.32686742525530127], [0.11403874762298992, 1.5115804840448708, -0.6933733615435567, 0.976332453078738, -1.0441778861440767, 0.8742587533776104], [-0.8160747644257802, -1.3299126573638629, -0.06049459253998337, 2.4413449216881493, 0.9512196112219389, 0.30551317085027163], [0.4726456986044836, 1.817867892368189, 1.0597846572595242, 0.33430148296941736, 1.0143715154093722, -0.4788139596033137], [0.16897600090635947, 0.14136536947261572, 1.488176053491309, 0.5344900452245421, 1.476362289932175, -0.868729581506382], [0.7618107710434899, -0.7379610454064316, -1.3877428628265356, -1.780919555504298, 0.47258997887957577, -0.20772368041194267], [-1.1346521745174962, -0.05498063525968947, 0.08781291716697856, 0.18273993112367304, -0.2199854108166052, -0.8191968134614424], [0.6064010368685079, -2.569275634712709, -0.27250703704315216, 1.606598208556567, 0.02503603939055407, 0.09641643060278472], [-1.063719217467276, -0.2684967211025073, 0.006098109484078005, 0.8688862288291607, -0.10359377081094741, -0.07540553159061773], [-0.6754420852692551, 0.5864698726478227, -1.2740444196044614, -0.8371327341670264, 1.1661919183611427, 0.5994323475662167], [0.3226016761531959, -2.495415499474011, -0.23575916160986232, 2.548081443127418, 0.26242833455358644, 0.8444591051119901], [-0.4452251479275592, 0.5320574855867011, -1.2419247258625525, -0.36412502232991517, 1.3674975605508222, 0.5464570196705528], [-0.40780080744242503, 0.5179768501719592, 1.6464241040222944, 0.4376032282117716, 1.4598529297568217, -0.840440393074131], [-0.43468999400386327, 0.1499893354228728, -0.0699963018502084, 0.020837376046106717, -0.7186429692331538, -0.7158042670032141], [-1.1209073425021978, 0.060381382331808335, 0.05647976993453201, 0.12476513945500924, -0.3706683069506089, -0.6809093525686053], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [-1.1105174995017477, 0.12579870333057744, 0.038902121322495466, 0.0967770847466375, -0.45468656812059277, -0.6023575652719363], [-0.013490494124725055, -0.649752212133158, -1.3395250646499626, 0.11294034382996132, 2.1938896289783028, 0.11867854412293145], [0.3573161914154756, -1.0963069593174937, -0.1012413729131282, -0.8638092909035395, -1.2718252438290103, -1.2859019169624208], [-1.1232702387254838, 0.005473797601041048, 0.0716992258410479, 0.16022720834652024, -0.2966500815716152, -0.7465122221925814], [-0.6360001381085819, -0.027273868380563816, -0.035051278238294056, -0.22934480262159324, -0.5862479044784267, -0.9376198930716371], [0.543137835829891, -1.2978642537341716, 1.8778041078708687, -1.8733778006228798, -1.0619004566229069, 1.489451712916857], [-0.10570885890948976, 1.2644581449576686, -0.56681124282265, 0.5435183548961574, -0.7183383510039941, 0.07080225835833197], [0.7301377079958901, -0.9712579353637675, -1.3246617854409064, -1.6709764079290994, 0.7751833006312072, -0.48758339401410183], [0.4546961449644032, -1.5557641263062716, -0.1854563667418634, 0.24959683745763983, -0.9112789992944788, -0.33666589047739437], [-0.7074603633008892, -0.982822103516796, -0.15752391817477848, 2.0641494959514057, 0.5355602692194376, 0.2985740714268665], [-1.1105174995017477, 0.12579870333057744, 0.038902121322495466, 0.0967770847466375, -0.45468656812059277, -0.6023575652719363], [2.4916426662160527, -0.585720679646081, -0.9771895688351482, 2.898204719429183, -0.8054029501060624, 1.3253553741393038], [-1.121900729003983, 0.04195231194901321, 0.06157332240114422, 0.13629041123374575, -0.34593519089616975, -0.7029385335090628], [-1.1087036194008162, 0.13915934943123626, 0.03528954015713245, 0.09048076641074626, -0.4720157416260161, -0.5863303195081632], [-1.1332641510240422, -0.03630885705389676, 0.08268023654563605, 0.17178127078462574, -0.24483432470448874, -0.7968577647901166], [0.024911694942810758, 1.4745840974553652, -0.6895314959852692, 0.8354141622260237, -1.0424670756872456, 0.8255256474982533], [-0.838532681739841, -0.5698581265860995, -0.1284427675680493, 1.4492029623426073, 0.11179679223194279, 0.21601047055267483], [-1.0328130326195135, 0.7203769000972763, -0.12208674534255967, -0.18910378815552456, -1.227533057822033, 0.1107340407777897], [-1.1105174995017477, 0.12579870333057744, 0.038902121322495466, 0.0967770847466375, -0.45468656812059277, -0.6023575652719363], [0.6138468543046278, 2.665314998678962, 0.8325594090191702, -0.015830849210834226, -0.07040626231229584, 0.5391251714135192], [1.1726894338735765, 1.3811945309734444, 0.9274047783295511, 0.7069350424096101, 1.181091610352484, -0.5723509499153648], [0.9271248031806837, -0.8625940055098188, -1.4951449472231833, -1.311082820510442, 0.5196815344750043, 0.22046406142084943], [-0.5811882809674394, 0.4891397561697808, -1.3132379837727934, -0.39022665462819955, 1.2438002895623228, 0.9856138472785212], [-0.13354306879967887, -0.9766760697386616, -0.01911029686742457, -0.5441482947769906, -1.103382397906622, -0.991366558127917], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [0.10071773885731712, 0.9512925766411268, -0.618348925921131, 1.2278023025278146, -0.3900108686417787, 0.2762813344485754], [-1.0956595952181118, 0.2184541146271821, 0.014015994746824727, 0.057402879873099874, -0.5736097447851491, -0.49109148466833236], [-1.0377204541438116, -0.05650773519490712, -0.0514253041105858, 0.7637475969049969, -0.38008046206035073, 0.17874956265749775], [-1.111977587975416, 0.11932768597747982, 0.040609211741069115, 0.09873162672049657, -0.446613437257762, -0.6101502063552663], [0.3137060918899141, -1.2834909432188906, -0.05196191683874026, -0.8098597529176627, -1.039054136286206, -1.5113870233469444], [-0.6449125459532982, -0.04500021828383533, -0.03073487989973701, -0.233240400171666, -0.5668358189532797, -0.9592208806054643], [-1.0855833687690823, 0.31374176094192596, -0.011958352746955144, 0.007112337074497345, -0.6987745387174548, -0.37693377835346675], [0.6356620601623326, -1.4318943333037963, 1.8487721480135433, -1.403051905211871, -0.9349013211396499, 1.831776797913828], [-0.035395908536560654, 1.6462460170323208, -0.6686888641317226, 0.3983926540419319, -1.203361121398653, 0.529746129343353], [0.540280359646029, -0.8016030917532955, 1.6471572945750286, -0.09991451019281647, 1.0930692357866083, -1.489855637048851], [-1.1271795728452494, 0.0030694969561238803, 0.07208683529874917, 0.15461426916294582, -0.29550297060423136, -0.7495818191094223], [-0.12152852735947414, -1.3027658224359306, -0.00019927504397419354, -0.08580405724737661, -0.7572263847024095, -0.8822508399322216], [0.5217046983304918, -0.5775482019319262, 1.656369627573072, -0.4964455117607055, 0.8832673971923386, -1.7209935993620316], [-1.130333380834273, -0.01668826588833618, 0.07739459299198598, 0.1630376534981281, -0.2701359248046588, -0.773307356240766], [-0.7908377122580569, 0.7883265924294549, 1.7855201470220174, -1.2888431261474211, -1.3564026505704991, 3.2103970661663674], [-0.6149608333062141, 0.3792785047669411, -1.2849141517514067, -0.373958794981907, 1.3759182664362943, 0.8528498600310882], [1.466437247454588, 2.0368252125497786, 0.9167570924155912, 2.1142636026996655, 1.0459833110236225, 0.4020029587927093], [0.19616042273538992, -2.6316986283013204, -0.1334335188114668, 2.2103799665343735, 0.4875642207576404, 0.17801399746636346], [-0.3948429719672309, 0.18117549633701593, -1.2866365197842857, 0.38317578451055273, 1.661010785630638, 1.1253676537659374], [-1.1105174995017477, 0.12579870333057744, 0.038902121322495466, 0.0967770847466375, -0.45468656812059277, -0.6023575652719363], [0.0888804282758843, 1.0666548942275398, 0.3133193928101371, -0.4442840500005737, 2.9939057292192395, 0.46949134460643827], [0.781336469133511, -0.6635890122085673, -1.407161535286031, -1.7982154142920446, 0.3813148802966207, -0.11802004398804641], [0.18781866223821717, 1.5928590240237996, -0.7918218887316103, 0.9254586466960173, -1.2221425034587639, 1.0458955468545128], [0.008764166609988331, 0.06756387292895968, 1.6515802223275033, 0.2184969500065516, 1.7042762088273784, -1.5332982243925164], [-0.9021876020652714, -0.03185246477365561, 2.0072881208126794, -0.9023267557806328, -0.2926049060073515, 2.2265216441951257], [-0.6589018610324254, -0.13044939240058473, -0.00780528611628606, -0.1974688361950392, -0.4573197413334419, -1.0618481390819963], [1.1766790873734991, -0.9366377669414488, 1.4255545740745001, -0.0663666868590789, 0.8110050905932961, -1.2269643458358397], [-1.0708965888588389, 0.44750121338141774, -0.04837995232157066, -0.06246158083584458, -0.8741754573405374, -0.21665752595328883], [-0.4415043387055981, 0.8129094179565197, 1.634658598333531, -0.03893004674124532, 1.1445027173198552, -0.9878354151725762], [-1.1365735052336379, -0.03834416082088873, 0.08300835815182883, 0.16702977005680417, -0.24386326508346767, -0.7994562593590889], [-0.031026096544580776, -0.7660161300161492, -0.1440391457034844, -0.3055545701513531, -1.4437063520883369, -0.23746083634185533], [0.8507857315040394, -0.8741739768773626, -1.416422914097263, -1.3156896180362718, 0.6006305703726948, 0.13181453370000182], [-0.6836303173458844, 0.4567071271330184, -1.238267149351531, -0.758228338826473, 1.3396859763108637, 0.4442587345882232], [-0.02321437337079897, 1.0193766048047053, 0.3917448256156366, -0.8196691927131138, 3.104844628629136, -0.09006767764879466], [1.4314015335106014, -0.7362560778338173, -0.7925120819301751, 0.41529444519906766, -0.8589764284756833, -0.401165413978007], [-0.03207244236089621, 1.2791748745055675, -0.6389260075555874, 0.8701615398470407, -0.8057716411507597, 0.5895392885034831], [-1.1382150305611225, -0.1106262137463072, 0.10315134877651115, 0.21647912310367823, -0.14561508274488605, -0.8857418877811561], [0.673103935437232, 1.8643072628390656, 1.0614518322382895, 0.6779723029502754, 1.060962519042061, -0.4130576714178562], [0.756161144898031, -0.8625699841570562, 1.4445434680944709, 0.28814432391549355, 0.9735836976996096, -0.912236407806197], [0.3167570226652594, -1.2637964514440334, -0.05725947428309015, -0.8184308461787286, -1.064390994955058, -1.487742265081834], [0.3805591717183749, -1.3798548045245878, -0.09343376478090831, -0.41524660870142865, -0.9778472330605715, -1.1254713097743982], [-0.9308830297177947, -0.21543673642156652, 2.056651114090816, -0.8229121415058662, -0.05656549282425744, 2.006101307307794], [-0.7082789036109999, 0.3168210104817211, -1.2008578392767026, -0.7029573716272861, 1.5180099751758873, 0.27616050301300443], [-0.4758640922003751, -0.028750328734714885, -1.3147909217713123, 0.2525090348458074, 1.7973271472275805, 0.9417142124714282], [-0.6482919158518532, -0.07046974564615627, -0.023842434774569196, -0.22108996675202225, -0.5337578878595243, -0.9897697124253453], [0.1770109747155721, -2.1368811739457403, -0.12678385089332236, 1.4172615637512647, 0.013795020446681443, -0.22762855761491155], [-0.3984241827560072, 0.2062332047614311, -1.4560603333523319, 0.09680195329024231, 1.4105347450286692, 1.2968428266623984], [-0.638760144695118, 0.7872110545725767, -1.3276320089811722, -0.9139811312246717, 0.9110121038293869, 0.8407266621912823], [-0.6169600144693008, 0.9609815717181202, -1.3747490122915436, -0.9992449614978584, 0.6846406679993559, 1.0490866489344812], [1.4290610462903912, -0.24438655592035732, -0.6950515077909444, 0.1160806860620773, -1.2130813094698345, -0.45385586531119604], [-1.0387589385040328, 0.654356648087019, -0.10401450573613111, -0.1523103511511516, -1.140242660494201, 0.03169321469660548], [-0.8504513147945858, -0.1278915931763589, -0.17983117863530731, 0.9068113199459752, -0.3932579446594069, 0.24508970961305368], [-1.1135523773052498, 0.12393220677728654, 0.039203028664658364, 0.09241967143307311, -0.45379604776433424, -0.6047405418257998], [0.7717124730087931, 2.0906480897686137, 0.8507722942259518, 0.7671004887257522, 0.5899115695523266, 0.43023501812819903], [0.5104161931633352, -1.5373790767661923, 1.9425514746795227, -1.7608892753702872, -0.7513547525313278, 1.2021224736962979], [-1.106064197480183, 0.15860075692768086, 0.03003278370833007, 0.08131883744614639, -0.49723185177198564, -0.5630086767079833], [-1.1079970077666357, 0.127348844535853, 0.03865221522476712, 0.10039595343078411, -0.4554261528232481, -0.6003784830492362], [-0.4545542186224427, -1.6290334872188827, -0.31707402204454305, 4.332928103532185, 1.0216373339971057, 2.4543811714730785], [-0.9353075120919806, 0.1976782139029467, -0.1228644644256762, 0.4424060229513337, -0.6717119782220082, 0.06069521213701437], [0.8124486270428453, -0.5108186383288934, -1.4477092360296053, -1.850681899679355, 0.18887346982258238, 0.06577795229889631], [-1.1177535345131748, 0.08013914517626844, 0.051172012241295264, 0.11634175511982706, -0.39603535275018137, -0.657183815437262], [-1.1378561907133042, -0.09258739932082369, 0.0981207114450329, 0.20404278267009188, -0.17016200457704048, -0.8642109508876219], [-0.850973651653326, -1.280370032773034, 0.004128540204596525, 2.470333953681183, 0.9711256503793899, 0.2919007002105656], [-0.4420630938992198, -0.8951505413888003, -0.08645081443975615, 0.9026172467352201, 0.28129311716433913, -0.8260490796355104], [1.4734753202021242, -0.22904966360404344, -0.777165587032022, 0.057848073490661955, -1.3167490191861433, -0.36237822773779876], [0.421550508925608, -1.7848877812503934, -0.4430656770460417, 1.5591089395517732, -0.6563917471202995, 0.843953729220734], [-0.19202031707508851, 1.2169483103996432, -0.5598158072989085, 0.4155432840440932, -0.7006567879241681, 0.009676955695671724], [-0.9119842845853046, 0.24232664528587333, 0.020779927904989243, 0.3858777786380913, -0.5052972193786625, -0.45311609039896555], [-0.07457313976944444, 1.8716051304762642, -0.7347352702386706, 0.160821043383716, -1.5340481867089577, 0.7964727675337], [-0.6769565846697856, -1.4056697524599986, 1.9609314725164357, 1.3831098121849208, 1.0699507992155408, 3.1575851134896595], [0.4011211450886063, -2.092419935489841, -0.04662208361066397, 0.2094017267308625, -0.20391568243889066, -1.4059193865973612], [-1.130333380834273, -0.01668826588833618, 0.07739459299198598, 0.1630376534981281, -0.2701359248046588, -0.773307356240766], [0.5579663589902016, 2.363675798155619, 0.9130258223589577, 0.098210444769469, 0.3126150994070239, 0.17651032684700518], [-1.0126735911901388, -0.5041081990587802, -0.005180325226825533, 1.0055474490091472, 0.14062017981294653, -0.282855638627224], [-0.8625946571675962, -0.6099208834128611, -0.11128993426082379, 1.7624242938598582, 0.15281853319984204, 0.593474531767279], [1.5124028915266319, 1.1460041769670584, 0.8856560585879723, -0.5014159474486113, -0.19046239491169095, -0.3510442307426953], [-0.6704332077427175, 0.5539141646152417, -1.2645509315955434, -0.804037983649473, 1.2136054255810185, 0.5608669485891232], [-1.1233541287607116, 0.254875917494156, 0.001776725157088479, -0.02121501047595192, -0.6388082565557586, -0.4490896148338216], [0.5238803005532945, -2.725037242935024, -0.3767935306530367, 3.6900481820346975, 0.3648683733482454, 2.0731648197004278], [-0.009174438576032415, 1.1226750332224078, 0.3638151544463723, -0.8683196330652737, 2.9708720698207833, 0.03384850423803853], [-0.7633351955343629, -1.399320300296852, -0.11055720646458086, 2.7897910754461375, 0.9676845682964734, 0.7228453537383205], [-0.022187046661679297, -0.7185452018800622, -1.313192429013542, 0.4759752762247739, 2.282775591359465, 0.46267530927185263], [0.5293469484940335, -2.08456507041098, -0.12049855573745025, 0.3144095056137451, -0.2512065205979125, -1.3190269134351011], [-0.0929495603031353, 1.2778783504274123, -0.5696386469769665, 0.5577869796699387, -0.7297269948081327, 0.08746700387772778], [0.6000665442297305, -1.4537861233864628, 1.8523014341284012, -1.4541591935675748, -0.9244565739103122, 1.803827310197326], [0.3213328469968353, 1.372850313334714, -0.7175407018272698, -0.06204386131189286, -1.2346797113037289, 0.0448952101131718], [-0.71357874092201, -0.21393883164714472, 1.8443150668854937, -0.1481722054079054, -0.26164701741633434, 3.0847738210562574], [-1.0908622126103489, 0.2748589459490369, -0.001444839849350109, 0.025436195003697162, -0.648342318425516, -0.4235770639538266], [0.780016758173194, -0.6733097159567892, -1.4045331570616302, -1.7936344498097445, 0.3939229353696055, -0.1296808653881364], [-0.6994668488696845, 0.34005868215435076, -1.2067266106587171, -0.7032567650388737, 1.4909826371866803, 0.304328877787144], [0.3344586371101335, -1.4972979547401042, -0.06388761653491024, -0.41667900506161215, -0.8421118719380858, -1.2679151901202235], [-0.0955954419131582, -0.5371223484559329, -1.2960459155291066, 0.00719237341590126, 2.101284066502345, 0.17785826738353488], [1.9592220295895604, 0.8567429553505964, 0.847852016793643, -0.7806963780665446, -0.15044958020506405, -0.8501531714144609], [2.4850464558764758, 1.4295867830546716, 0.7257858437999921, -0.20705056777237862, -0.6469243674330578, -0.13978899987431853], [-0.9021876020652714, -0.03185246477365561, 2.0072881208126794, -0.9023267557806328, -0.2926049060073515, 2.2265216441951257], [-0.9722552543741863, 0.13931853901200078, -0.10921098851272783, 0.4152603468079529, -0.6119871931060774, -0.010814393413479489], [0.5429835200093737, -1.297959160338576, 1.8778194082441986, -1.873599364011705, -1.061855175926826, 1.4893305446175078], [0.29216477990921913, -0.6587380360795673, -1.2927299015112226, -1.3919463313593603, 0.7081331258079735, 0.03972074001539269], [-1.1053761629281234, 0.12338766397177044, 0.039954682170496254, 0.10820981307659135, -0.44855048506184325, -0.6049667880668483], [-0.4545542186224427, -1.6290334872188827, -0.31707402204454305, 4.332928103532185, 1.0216373339971057, 2.4543811714730785], [-1.1019021804175857, 0.23243295105837491, 0.009639886985919601, 0.035488353083419015, -0.5962196188132224, -0.47474396606321584], [-0.41127196609509076, 1.054229322322488, 1.5692228358016334, -0.15741748469805125, 0.8301113215867363, -0.6984823387090822], [-0.6710006576288748, -0.24479907287843364, 0.023364657133970863, -0.13713070171223904, -0.3071198392909685, -1.1988431381151108], [-0.09092752526724143, -1.7291322828593645, 0.0404999539395207, 0.15101875518030605, -0.262597114762658, -1.31824577157866], [-1.0434965749912037, 0.04178315101188948, -0.00601910380284552, 0.48273056781284374, -0.4103258443088517, -0.20170706779533942], [0.05157662067756119, 0.534281805984465, 1.5972627674831144, -0.23883717840080185, 1.1953826122042326, -1.4718688954849517], [0.5104849151420722, -1.5373368116916972, 1.9425446609132668, -1.760790605807797, -0.751374917534649, 1.202176433978941], [0.9837240210950108, 1.8832802207386565, 0.927260604951353, -0.081794056320366, 0.6032242354653098, -0.5516277885165237], [-1.1260158175511046, 0.039421469164889825, 0.06198133235661909, 0.1303820541984043, -0.34472770566734473, -0.7061696881583693], [0.8208557323120673, -0.5947387511944692, -1.4235675060518365, -1.7738533804208125, 0.30861472310109683, -0.033866699793143766], [-1.1304716436829463, 0.001044822728825202, 0.07241324326312906, 0.14988758353467266, -0.29453698242117127, -0.7521667428288681], [-0.6704332077427175, 0.5539141646152417, -1.2645509315955434, -0.804037983649473, 1.2136054255810185, 0.5608669485891232], [-0.8769965140851074, -0.20729345939849844, 1.9849322238518141, -0.5286763523603993, -0.1458482934674437, 2.5159769612428193], [2.302163741179645, 0.6450878929934177, 0.8589269883345174, 0.21754386136649864, 0.22081103146849465, -0.5874403596714015], [0.3306742727641722, -1.1661465164745888, -0.08361465826931032, -0.8632065285205435, -1.1906828555999485, -1.3705685990173058], [-0.5559578170552512, 0.6472018589254609, -1.3557000453186874, -0.4576137293096577, 1.0408639231657442, 1.1754181443292673], [2.1274225208553497, 0.7463713771766364, 0.8911156834328959, -0.3837790117634791, 0.09349509724582494, -0.9730727956606142], [-0.6807209291105217, 0.5475870576549334, -1.263530906706856, -0.8188088762378265, 1.2166241386530812, 0.5527890619658568], [-0.6762795014701415, -0.28368188787132287, 0.03387817003157576, -0.11880684378303932, -0.25668761899902975, -1.2454864237154706], [-0.6783514735046178, 0.49558994212590773, -1.2487806622491362, -0.7765521967556729, 1.2892537560189257, 0.49090202018858337], [-1.0506363371468832, 0.566870314353018, -0.08035910171652057, -0.11108167081045212, -1.0267701648373397, -0.07325417790420444], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [0.5891503498474331, 1.221901440887384, -0.7289875353214017, 0.7506252987529559, -0.98802526551099, 0.376115790058108], [-1.121829957733991, 0.006359592575484169, 0.07155642235663186, 0.16229513330888976, -0.297072701401704, -0.745381318065324], [-0.24301796733858047, 0.16362863534526367, -1.2754557897300176, -1.0464203443608118, 1.381520040988129, -0.05972375664109117], [-1.1344307765638968, -0.02811728851584178, 0.08029837793633254, 0.16363047698157251, -0.25671281720221717, -0.7871492070291378], [-0.7021062707903175, 0.3206172746579061, -1.201469854209915, -0.6940948360742737, 1.5161987473326493, 0.28100723498696417], [0.11583986200814116, -1.705924639129003, -0.17100944286931746, 1.119544372362371, -0.5039504274490183, 0.21225817999462823], [1.5328156504913015, 0.05105971351550404, -0.7702745096240144, 0.0966791344936653, -1.5612670546038585, -0.09614877420180823], [-1.1098352687512745, 0.13846336766560233, 0.035401742894887886, 0.08885596822602734, -0.4716836831880892, -0.5872188870367223], [-0.5918324277707904, 1.1189804034041972, -1.4172008735885506, -1.0667797451051997, 0.48173448873349756, 1.2388101671189948], [0.6712782388577491, -2.7431926258475507, -0.2189988932378529, 1.8551661830734647, 0.2992987389608758, -0.10763231704001869], [-0.6965574606343218, 0.43093861267626565, -1.231990368014042, -0.7638373024502269, 1.3679207995288962, 0.41285920516477714], [-0.13414052459957138, -0.9714704737776256, -0.020613382308260545, -0.5490570152685206, -1.1108517861550404, -0.9851894943692403], [-0.5547940617611061, 0.6835538311342271, -1.3658055482608171, -0.48184594427419875, 0.9916391881026299, 1.2188302752803206], [0.9725521867173683, -1.61332089375613, -1.435813489618814, -0.6118785901908964, 1.3596417429621124, -0.10420730405802238], [-0.7097545702374882, 0.33373157519404234, -1.20570658577003, -0.7180276576272272, 1.494001350258743, 0.29625099116387754], [-0.6654983811774808, 0.6104035260860863, -1.2800253937242319, -0.8358073293938955, 1.138832521934009, 0.628489289868916], [0.5767380973942804, -0.5330633291555068, 1.5745469072327567, -0.22646616184362403, 0.7447648121754781, -1.1677190622100153], [0.6000665442297305, -1.4537861233864628, 1.8523014341284012, -1.4541591935675748, -0.9244565739103122, 1.803827310197326], [-0.6833603510311552, 0.5281456501584886, -1.2582741502580537, -0.8096469472732265, 1.24184024879905, 0.5294674191656767], [-1.1309860297513368, 0.0007284673808098313, 0.07246424450756347, 0.149149038905255, -0.294386046767568, -0.7525706371600313], [-0.4521830058649073, 0.752887506127596, 1.650702560758071, -0.015407585746752574, 1.2209610288492594, -1.059967802111871], [0.3730057904531019, 1.1763199158209923, 1.2551288294884475, 0.4003654817025063, -0.7183645880316897, 3.991324267573703], [-0.14195843725807106, -1.0353060180363172, -0.0032907399183962796, -0.5173762374130599, -1.027588203253072, -1.061721810010093], [0.9664306470188228, -1.3319957765264654, -1.5151274758228224, -0.827892648789757, 0.9703719320652995, 0.23097291361550437], [0.5330964609039989, 0.8542150682780624, -0.7844706973910772, 0.9803359887186543, -0.7293282332978923, 0.5029910712064833], [-0.5283998425547727, 0.8779679060986733, -1.4183731127488415, -0.5734652339201979, 0.7394780866429378, 1.4520467032821192], [-1.1138095703394448, 0.1237740291032788, 0.039228529286875514, 0.09205039911836428, -0.45372057993753273, -0.6049424889913817], [0.005527954570022319, -1.379655275080202, -0.1237883671390108, 0.27729707655634206, -0.7966754761639199, -0.39910609892610927], [-0.10053358325130633, 1.873457180623051, -0.7371563574969224, 0.11059603302349304, -1.5508722745145924, 0.797337885521199], [-1.0477466466956895, 0.0035330467150310567, 0.0043924066058903975, 0.502531515000879, -0.36019549532411943, -0.24754256473337272], [0.15281035866690063, -1.199717205739974, -0.244509822338853, 0.2931019235305761, -1.0695327197509152, -0.10600784982534309], [1.2247116278978705, -1.019312277905176, 1.371181682969119, -0.04654468115398206, 0.8437732149235718, -1.2521839098473921], [2.129658287014112, 0.6820469586557099, 0.9093119185911591, -0.3328136317679047, 0.18296087189816437, -1.049667802758349], [1.5068755957630142, 0.1369494700800967, -0.7228690605344473, -0.21328884407166085, -1.5855957592311163, -0.4924380624362477], [3.9296737089827967, 1.9082379601481443, 0.6974377731261053, 2.1650004321404213, -0.508662073655915, 0.5057967281369494], [0.9817008195521815, -0.543938957214335, -1.5804770802704466, -1.4399486128380161, 0.11260131645302139, 0.6033038101716482], [-0.9021876020652714, -0.03185246477365561, 2.0072881208126794, -0.9023267557806328, -0.2926049060073515, 2.2265216441951257], [-0.31243512415925484, -1.6860716851242985, -0.14876587173175426, 2.5163699625001517, 1.0064999199016744, 0.22694074913227233], [0.21745280306968923, 1.3845143304835594, -0.8046281963146819, 1.3313914015414448, -1.027806354480017, 1.2963411811608323], [-0.26785646643131855, 1.378552339782796, -0.6106750513054114, 0.1552915909563759, -0.9640591184779622, 0.19847458797815046], [0.07624691711046554, -1.1278562204897966, 1.9475686122041855, -1.5452066772781199, -0.9493738666562115, 1.8453052924224747], [-0.6648843975867217, 0.6642355026336011, -1.2950714453996701, -0.8737804500254259, 1.065327477777265, 0.6927189187669361], [-0.8113823228438265, -0.9887489203263643, -0.07472280652743978, 2.332905019212184, 0.592883998189851, 0.641725342213265], [-0.6284652655547358, -0.2848461967937978, -0.03567633223075733, 0.1707614007710765, -0.33654393933491805, -0.7470248889947184], [1.9570143420723853, 0.5973049075252522, -0.9733977854536018, 0.3676074197736812, -2.1842374012678687, 0.6481242238885432], [1.7222605854784716, 0.3585051653274861, -0.7691997183237939, 0.031198589829881725, -1.7710040439543955, -0.21707255770457248], [-0.17113624269844618, 1.2297923375290694, -0.5618864578229436, 0.4455281959984512, -0.7067847754604555, 0.026075065540902578], [-0.6856511669887707, -0.12908250481551375, -0.010148157866073024, -0.24882647859893708, -0.47391235422071065, -1.061602433440769], [0.931197946710191, -0.7353621027791383, -1.530514207520638, -1.3958955728863367, 0.34739496175410783, 0.372406519749536], [0.6043978530179509, 1.7382933100915892, 0.9372923134906661, 0.7081954700361622, 0.981189277656893, 0.0013716245593667904], [0.5429835200093737, -1.297959160338576, 1.8778194082441986, -1.873599364011705, -1.061855175926826, 1.4893305446175078], [1.5840177552811288, 0.58145722302878, -0.9152128216691133, -0.19244950977200062, -2.260656804929137, 0.5390319133494], [-0.08927287605908289, -0.6465411391770278, -0.10841566864555714, -0.700762525630942, -1.5318802190417367, -0.5953697328727268], [-0.7283826608768637, 0.4882111548245352, -1.2503774645602044, -0.8653883778084206, 1.2718480172071371, 0.4795130495841843], [-0.6690383189078858, -1.3473455299706643, 1.945161203170028, 1.3556240252911216, 0.9943024687776333, 3.2275500418902], [0.5283775181476834, -1.4083391462007064, -0.2290715072310236, -0.04823913948143781, -1.072271707987475, -0.5835260637999101], [-0.80535453282154, 0.6813988511990097, 1.8144323074904305, -1.2384525168421214, -1.2177140447676686, 3.082128030765378], [-1.1121487756589759, 0.11922240291766027, 0.04062618495521687, 0.09848583906782639, -0.4465632058722429, -0.6102846223886773], [-1.137049225975723, -0.1099092259855651, 0.10303575955612508, 0.2181529606517904, -0.14595716331021222, -0.8848265016690076], [-0.6683612357082412, -0.22535766538198904, 0.018107900685168408, -0.14629263067683906, -0.3323359494369377, -1.175521495314931], [-0.19171168543405434, 1.2171381236084522, -0.5598464080455691, 0.4159864108217438, -0.7007473493163301, 0.009919292294369626], [-0.43834930031651975, 0.8148498151191074, 1.6343457771006686, -0.03440010940224896, 1.143576938394916, -0.9853580889029533], [-0.6333607161879486, -0.00783246088411889, -0.04030803468709654, -0.23850673158619312, -0.6114640146243956, -0.914298250271457], [-1.1278141194992155, 0.0026792409988120607, 0.07214975043388337, 0.15370320050809613, -0.2953167763819465, -0.7500800631563456], [-0.17202354179620968, 0.6171081147237982, -1.3973812129411602, -1.2423738179582566, 0.7985307945587733, 0.48475191438631376], [0.5263214466658721, -1.4206883667130294, 1.9110041222204521, -1.8157621795953967, -0.9026715784104643, 1.3421062907800207], [1.4648374806950006, 0.039822892795774216, -0.6987207585798547, -0.2218400595336235, -1.4754940566467272, -0.610442966704163], [2.179152995757842, 0.3556169227682374, 0.9310641730898758, 0.19634629785666566, 0.5502054538620816, -0.9390181356386397], [0.002165077379343181, 1.266620948742711, 0.3242926997925511, -0.9516001307052322, 2.7796564056662705, 0.206099488240829], [-0.14017413899054848, -1.0520267695398293, 0.0015274058282708023, -0.5018628246354682, -1.00367013972809, -1.081569961562268], [-1.122535275657949, 0.04156205599170135, 0.06163623753627862, 0.13537934257889608, -0.3457489966738849, -0.7034367775559857], [0.4726456986044836, 1.817867892368189, 1.0597846572595242, 0.33430148296941736, 1.0143715154093722, -0.4788139596033137], [-0.4693219649771261, 0.6265289869403992, 1.6848697640334733, 0.044169767622695394, 1.3848606733600988, -1.2115449094635131], [1.4604694084468115, -0.7550677362634985, -0.7068639759908996, 2.0132107159900925, -0.32232232409735073, 0.252552803379889], [0.29354281592690085, 1.0584239762882286, -0.6314316568051939, 0.11418133074719582, -0.8186623421166566, -0.33151525958871936], [2.9802333394656997, 1.2761951118924186, 0.6506127121782477, 0.9047808413889957, -0.3788858226910514, 0.27117457201976825], [0.5429835200093737, -1.297959160338576, 1.8778194082441986, -1.873599364011705, -1.061855175926826, 1.4893305446175078], [-0.3212849507256502, -0.5136093307182082, 1.886502968621098, -0.39183910162595736, -0.1302890112449384, 2.4995404275497948], [-1.1682243014114788, -0.0756280099454028, 0.09114158478672953, 0.13453778192224686, -0.21013438407428128, -0.8455575424368804], [0.9771742287553464, -0.9035315476254587, 0.5833784964247414, -2.761024110069694, 0.6494047212562665, 2.6120752657873574], [-1.1105174995017477, 0.12579870333057744, 0.038902121322495466, 0.0967770847466375, -0.45468656812059277, -0.6023575652719363], [0.9459209164956984, 0.14983635950770408, -0.7112163743776874, 2.337592834035763, 0.24919872623854214, 0.25260703312337723], [2.2600378719791308, 0.7622324098379241, 0.8963848167688612, -0.14561738331007074, 0.14470375514772918, -0.9472941606579734], [2.2472083218971206, 0.36183575086265335, 0.9343066221729538, 0.3199618048412785, 0.5791193213881742, -0.9280796604646369], [0.05729999392243513, 1.0026871351125726, 0.3289382307698401, -0.45724768147922484, 3.0642764059113143, 0.3915715439609781], [2.042465275020441, 0.6584851187761593, 0.9095292890933837, -0.4798557169310785, 0.16730725028088966, -1.0822793131581732], [2.134563812507949, 0.608218402266541, 0.9303680938630517, -0.2699132556278039, 0.2869326603413288, -1.1374588915471253], [3.107958677957422, 0.08730619525327857, 0.6920382854283762, 2.4748192090461174, 0.8918723716593553, 0.4229789486166851], [0.4768091785382379, -1.01587079190507, -1.406921618500067, -0.5180192275337997, 0.977025881766805, 0.6890144478199904], [-1.111977587975416, 0.11932768597747982, 0.040609211741069115, 0.09873162672049657, -0.446613437257762, -0.6101502063552663], [-0.4557263958499525, 0.41126960194666123, -1.4366689720100831, -0.029415009175675785, 1.204819098626982, 1.4675065158580272], [0.3277435025744026, -1.1857671076401495, -0.0783290147156601, -0.8544629112340457, -1.165381255499779, -1.3941190075666563], [0.8375886219008728, -0.9713810143595858, -1.3901391318532512, -1.2698799732132724, 0.7267111211025413, 0.015206319699102416], [-0.6029719931003963, 1.0230387873140359, -1.3911210963222762, -1.0180159217645295, 0.6072112968489307, 1.1238175304427478], [2.0333575587193766, -0.15222638998720564, -0.9121365661793633, 1.1241515039954078, -1.3289071586876773, 0.3264786968508675], [2.513939608573007, 0.6203021661672845, 0.7269379912811417, 0.702283295635309, 0.15650283785854713, -0.037774222322689524], [-1.1304716436829463, 0.001044822728825202, 0.07241324326312906, 0.14988758353467266, -0.29453698242117127, -0.7521667428288681], [-1.114617009896049, 0.0998862784810351, 0.045865968189871605, 0.10789355568509645, -0.42139732711179284, -0.6334718491554459], [0.9816275307769113, -0.8707294940283549, 0.5745091588105755, -2.7764823573701842, 0.6068594376048742, 2.6514241543513095], [0.9453333593959775, -1.2561468333872572, -1.4568678265312702, -0.7920391273029603, 0.9617865573935931, 0.2494465927622221], [-0.4545542186224427, -1.6290334872188827, -0.31707402204454305, 4.332928103532185, 1.0216373339971057, 2.4543811714730785], [2.382176147065756, 0.9743212673207472, 0.8458766940638777, -0.06981536777346047, -0.07902344879290069, -0.6880443377226025], [-1.0317007159322076, 0.7566972367712405, -0.13218714816024635, -0.21340985758300743, -1.2767426993197863, 0.1541057822957266], [0.7905744458557272, -0.5955440859710109, -1.4255601828568398, -1.830282165668144, 0.29305849478572865, -0.03639429418741654], [0.5189514798156084, -1.4854087944208507, -0.20815668417356978, -0.009966625438319153, -0.9717393258415251, -0.6759240674720705], [2.1105125268010583, 0.13548726636211209, 0.9066772912141688, 0.09640698033551694, 0.7073306045197818, -1.1341995052633607], [0.6571495684500872, -1.6096130864343499, 1.826783460012604, -1.0347190231234447, -0.7870579718937988, 2.118324075777144], [-0.13605962227486895, 1.5576091003607562, -0.651215495504859, 0.27328915715830754, -1.1371610533023089, 0.4188315308843781], [0.4590547815850739, 1.4745834531612798, -0.7387939439090719, 1.5221186072967514, -0.910664928429009, 0.918882467462564], [-1.0516887973648612, -0.43927905157020186, 0.05485588101285496, 1.0156748022926605, 0.13729243732459065, -0.27845105305451695], [1.7216398067183976, 0.04803565912956487, -0.7555932892817897, 0.45439884086541327, -1.4532267915789199, -0.08998269223770257], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [0.0023658776601791064, 1.1578099921196754, -0.6023800903640794, 1.0232197915409496, -0.6203438105721447, 0.4465867338676828], [2.3907096268184698, 1.068660100675724, 0.8200553095024051, -0.12232095362461226, -0.20373555651291952, -0.5750979912857619], [-0.599736265771775, 0.28011892952557116, -0.11856776698369399, -0.3845027837564371, -0.9879548132321383, -0.5691587825102528], [-0.10857371340533413, 1.5656042961013965, -0.651443186968782, 0.31922862088799764, -1.1330053948311, 0.4297887920294343], [-0.09823486383379133, -0.5565637559523778, -1.290789159080304, 0.01635430238050117, 2.126500176648314, 0.15453662458335465], [2.4942820881366865, -0.5662792721496358, -0.9824463252839506, 2.8890427904645835, -0.8306190602520318, 1.348677016939484], [-0.6757120515839843, 0.5150313496223524, -1.2540374186979386, -0.785714125720273, 1.2640376458729567, 0.5142236629887633], [-0.6836303173458844, 0.4567071271330184, -1.238267149351531, -0.758228338826473, 1.3396859763108637, 0.4442587345882232], [-0.6545966762189177, 0.6705626095939095, -1.2960914702883575, -0.859009557437072, 1.062308764705202, 0.7007968053902026], [0.7589013828081274, -0.8288409759283462, -1.362479105471211, -1.720339018092945, 0.5956518165373598, -0.3162540077895756], [0.8011321335382606, -0.5177784559852324, -1.4465872086520493, -1.8669298815265438, 0.1921940542018514, 0.056892277013303155], [0.3929711733573905, -1.295375702446993, -0.116206968348426, -0.45328284592970175, -1.0869004815922014, -1.0240825528749453], [-0.5524191376682995, -0.9454695964817291, 0.0006390510454626902, 0.8621211557795119, 0.39666533807779036, -0.9626578043575006], [-1.071870642697471, 0.39344778809029174, -0.03329819977502727, -0.02500544144490601, -0.8005647582262716, -0.2811698808831237], [-1.119704921881748, 0.025484644723913395, 0.06635066715226368, 0.15239465971487204, -0.32213787589407006, -0.7224635695963074], [-0.15467588850995975, 1.2399157086655619, -0.5635184976448432, 0.4691616241398169, -0.7116147163757561, 0.0389996841381289], [-0.36588764762733894, 0.032360496208625356, 1.6311781191782273, 0.9374884841171164, 1.934219893826334, -0.8491334615017907], [-1.0539481057687492, -0.08937155803418248, 0.02998257192559524, 0.5583853667839591, -0.23616766948327578, -0.3586578158304511], [-0.45663630788647197, 0.7200854525304925, 1.6595718983722365, 5.0661553738564945e-05, 1.2635063125006525, -1.0993166906758245], [-1.1053761629281234, 0.12338766397177044, 0.039954682170496254, 0.10820981307659135, -0.44855048506184325, -0.6049667880668483], [1.4568132468586228, -0.3517788699784971, -0.7439808730557685, 0.11568525790697023, -1.157565421669782, -0.509602481575285], [0.32911668679808537, -0.38655833112934246, -1.366324491794456, -1.5202133368637587, 0.3551075837644036, 0.36622373921791185], [0.5435151894496619, -1.2976321754508675, 1.8777666933579515, -1.8728360042827392, -1.0620111830183903, 1.4897480097981988], [0.5435151894496619, -1.2976321754508675, 1.8777666933579515, -1.8728360042827392, -1.0620111830183903, 1.4897480097981988], [-0.5700811198532956, -0.7860910630164756, -0.35706238619353603, 2.2477985058722596, 0.126457764258407, 1.1085618669924848], [0.06997199346793187, 0.9926624874472938, 0.3326768637763119, -0.4261019376575264, 3.084999705017871, 0.3802723813799378], [0.6962699559401875, -0.28667470087404334, 1.433163942138222, -0.3112118301582603, 0.3654869354802266, -0.7968107539754957], [-1.096982176424138, 0.2532769516257806, 0.004157011419792859, 0.029600835587805338, -0.6221049172730065, -0.44963161735638996], [-0.8451045778449146, -0.18767942782154254, 1.9817701466968831, -0.482886585336503, -0.15520630399083815, 2.5410184097749453], [-1.1093562683178801, 0.15657608270038226, 0.03035919167270987, 0.07659215181787317, -0.4962658635889252, -0.5655936004274283], [2.222230734873711, 1.1126534135240218, 0.7219956298761805, -0.2728173548757582, -0.46454153693853867, -0.033977374006066714], [0.9885520975600711, -0.5460106134947343, 1.5301014721619417, -0.9711971315777849, 0.49425097114321714, -1.8364942194039169], [0.5429835200093737, -1.297959160338576, 1.8778194082441986, -1.873599364011705, -1.061855175926826, 1.4893305446175078], [2.0775356808953727, 0.5731452237113246, 0.9360224182290093, -0.3517930399306764, 0.30366635288795585, -1.182237202023611], [1.069903540007858, 1.4780762137438517, 0.973783597793792, 0.5736955717195719, 1.0987095084773815, -0.5330222008795734], [-1.118941843167555, -0.2540705635095816, 0.0713920932058911, 0.5557296199372516, -0.04600524393780613, -0.5584349338184023], [-1.1350978386071495, -0.05525472553321006, 0.08785710464515654, 0.18210005605674556, -0.2198546401663235, -0.8195467475099624], [0.9000531181022443, 1.968248257935443, 1.0489399969155928, 1.0297242459620104, 1.0432523631701425, -0.277356237347502], [0.03095653780021233, -2.209155290190939, -0.036154499044358926, 1.325510475513969, 0.1396421757201906, -0.39226754891963656], [2.1670744800500654, 0.514124591773941, 0.8857439056545676, 0.05838931873292162, 0.3261302873202788, -0.7506133714784967], [0.3146994783916986, -1.2650618728360954, -0.05705546930535277, -0.8213850246963994, -1.0637872523406453, -1.489357842406487], [1.4268050473733354, 1.7505109208936107, 0.8396654903641546, 2.3157459855823257, 1.2020952362017678, 0.6267692065170664], [-0.4917969470288688, 0.5503430987191533, 1.7045808572280574, 0.05723110692321234, 1.4770011766065523, -1.3035643144245677], [-1.136229487957608, -0.055950707298843994, 0.08796930738291217, 0.18047525787202662, -0.21952258172839662, -0.8204353150385217], [2.776155160835092, 1.3235331581652399, 0.776843426945551, 0.4181420306234261, -0.34127798007034504, -0.25119818723383636], [1.1528484865306552, -1.5624166428689799, 1.5181685313986666, 0.2129192841086417, 1.5492255070857368, -1.9035875786618395], [-1.1012997397073063, 0.1971672165725547, 0.01957027205515969, 0.06225643488752884, -0.5475131364103208, -0.5167692854387865], [1.4032932384706485, -0.028598563114777525, -0.7574323962754423, -0.08928640319370686, -1.5232614570265883, -0.19784936678873308], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [-0.4531434136894591, 0.47373326309736724, -1.2261544565161457, -0.33663923543611585, 1.44314589098873, 0.4764920912700132], [-0.6954241718320412, -1.691032856204539, -0.09718830626933254, 3.328390299093905, 1.2974389473919858, 0.875850814881942], [0.7905744458557272, -0.5955440859710109, -1.4255601828568398, -1.830282165668144, 0.29305849478572865, -0.03639429418741654], [-0.9022731959070516, -0.03190510630356553, 2.0072966074197542, -0.9024496496069685, -0.2925797903145921, 2.2264544361784213], [1.3935864804832232, -0.3710869766907656, 0.29505936846235475, -1.753965901014829, 2.4905687780954326, -0.9770300848583517], [0.9086807938098286, 0.7519928446438902, -0.8827506765064429, 1.8297837014925749, -0.5972862587126273, 0.9687873333680659], [-0.664128447375653, -0.1871181906248237, 0.007698103918245275, -0.16611839296442282, -0.3824612269837088, -1.1296995692264247], [-1.0614752514295118, -0.2314804026629088, -0.004114496071364068, 0.8462049575863967, -0.15313547074662742, -0.031145222544121637], [2.2810483658145775, 0.48955663302186037, 0.9009810399249368, 0.29083929308329787, 0.4225399126362488, -0.774013502072841], [0.5602225403932772, -1.7092136583075443, -0.2234610317005946, 0.09970892510855334, -0.7490988646738037, -0.8697999559390598], [-1.1282256283539276, 0.0024261567203997195, 0.07219055142943091, 0.15311236480456203, -0.29519602785906407, -0.7504031786212761], [-0.12579599072133882, -0.9184571303091471, -0.03486359299968414, -0.5718798693234606, -1.1789804969590107, -0.9215360457607883], [-0.5747063059000032, 0.897877640751959, -1.353963164197556, -0.8738202056236325, 0.7944503552707836, 0.9760182438329881], [-0.6991968825549549, 0.41149720517982097, -1.2267336115652394, -0.754675373485627, 1.3931369096748658, 0.3895375623645972], [0.7680187894991684, -0.7519611409170113, -1.3833633277820043, -1.7590546589137144, 0.4952099957835714, -0.22409834071611334], [-0.6160871833639646, 0.16315412919888786, -0.0869762270464452, -0.33026975459825564, -0.8365072167027198, -0.7094925336424958], [-1.1166218851627163, 0.08083512694190235, 0.05105980950353952, 0.11796655330454607, -0.3963674111881084, -0.656295247908703], [-0.11888910543884068, -1.2833244149394858, -0.005456031492776742, -0.09496598621197631, -0.7824424948483786, -0.8589291971320417], [-1.0540542626737368, -0.035982478973889016, 0.015007921992363822, 0.5193782836712435, -0.3094614037249745, -0.2949936389960592], [-0.1515034057756671, -1.1302669325476309, 0.022630933490131837, -0.46632292572119466, -0.9025792956638081, -1.175462374259733], [-0.6017637237030475, 0.4764855422491642, -1.3111979339954187, -0.41976843980490686, 1.2498377157064484, 0.969458074031988], [-1.0518708637110195, 0.5661110615177809, -0.08023669872987815, -0.11285417792105457, -1.026407919268692, -0.0742235242989964], [-0.1254298338797898, -0.5554709586408276, -1.2930878433519135, -0.035643215090324434, 2.110038334411327, 0.15443239617606186], [-1.130950644116341, -0.017067892305954693, 0.0774557944853071, 0.16215139994282685, -0.26995480202033506, -0.7737920294381618], [0.11583986200814116, -1.705924639129003, -0.17100944286931746, 1.119544372362371, -0.5039504274490183, 0.21225817999462823], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [-0.9058570265227396, -0.03410921728425852, 2.0076519432899778, -0.9075952377490469, -0.2915281914288085, 2.2236404235943397], [1.7639521834206169, -0.12820126757997685, 1.205840334817659, -0.8437763113381999, -1.3569942588854103, 2.7081397801464124], [-0.7283826608768637, 0.4882111548245352, -1.2503774645602044, -0.8653883778084206, 1.2718480172071371, 0.4795130495841843], [-0.6028900737607985, 0.2603611666811115, -0.11326000929045724, -0.37607939942125507, -0.9625877674325654, -0.5928843196415956], [-0.13208298032601073, -0.9702050523855638, -0.02081738728599822, -0.5461028367508495, -1.1114555287694527, -0.9835739170445871], [0.9914092323585031, -1.01399199368549, -1.5213769387610616, -0.8813030252544641, 0.6549670373410741, 0.5406153476369545], [0.9444214826743407, -1.4524404856813182, -1.4829749221967956, -0.7817836105551699, 1.1234798207843526, 0.0861963248404648], [-0.6704332077427175, 0.5539141646152417, -1.2645509315955434, -0.804037983649473, 1.2136054255810185, 0.5608669485891232], [0.08719144028155898, -1.7924356446672716, -0.08315691162566065, 0.6003858772598303, -0.2925877783143674, -0.8162561051992764], [-0.45387590648906295, 0.7217831418700824, 1.6592982052941034, 0.0040139874530455665, 1.2626963314091562, -1.0971492321370693], [-0.9215578193081433, -0.1740653437190488, 2.045736405003993, -0.8354263119622146, -0.10818503334169956, 2.055921787274817], [-1.1094237598965624, 0.1387164519440146, 0.035360941899340505, 0.08944680392956156, -0.47180443171097175, -0.5868957715717918], [0.019091693803680133, -1.389131489114161, -0.12013814988989074, 0.30972316134759903, -0.7762138391064494, -0.4097050702946443], [-0.9618180923982454, -0.4728312846797491, -0.010222634659570821, 1.0785646977590075, 0.12569771510955718, -0.2429240601246983], [-1.1138095703394448, 0.1237740291032788, 0.039228529286875514, 0.09205039911836428, -0.45372057993753273, -0.6049424889913817], [0.8481463095834059, -0.8936153843738076, -1.4111661576484609, -1.3065276890716717, 0.6258466805186642, 0.10849289089982227], [0.799200424317542, -0.8931470572169663, -1.3414994526483726, -1.5977207318105062, 0.7060350592924796, -0.39085721134948975], [-0.9021876020652714, -0.03185246477365561, 2.0072881208126794, -0.9023267557806328, -0.2926049060073515, 2.2265216441951257], [-0.6340487507400081, 0.027380632071791203, -0.050229933149262926, -0.26539770721663797, -0.660145381334538, -0.8723401389125917], [0.2764626743460684, 1.0423463858951556, -0.6281758421976289, 0.0937089310863059, -0.8060058388222519, -0.351572717489887], [0.3929711733573905, -1.295375702446993, -0.116206968348426, -0.45328284592970175, -1.0869004815922014, -1.0240825528749453], [0.8850982164722727, -0.6214356794235828, -1.4847607479316944, -1.434794694576071, 0.2728211384750948, 0.434995890102341], [-1.1441831393545034, -0.09647857010141318, 0.09874802675157536, 0.1949586837282544, -0.1683054960377218, -0.8691788511609307], [0.013762290539277598, 1.5389995317548928, -0.7084062692653486, 0.7675998630061976, -1.1369620316890705, 0.9017678779238807], [1.829048030731632, -0.7717218759940329, -0.8268215432412838, 1.3884662172162858, -0.6993007961501637, 0.07449483999145656], [0.20868403776317193, -1.9035842839884047, -0.1898649282789512, 1.307318416176066, -0.28879830130494943, 0.052231155987247764], [1.1349802649994243, -1.9255972163307782, -1.6375914897645234, 0.3863105923259991, 1.4470190166081978, 1.094806049686002], [1.3056622983170585, 1.32141464774845, -1.1705724321642308, 3.6301212670213654, -0.986995077879966, 2.982203383860036], [-0.6780815071898885, 0.5670284651513781, -1.2687876631556583, -0.8279708052024262, 1.1914080285071114, 0.5761107047660364], [0.9947235700659594, -0.8159542786253702, -1.5766511974761699, -1.0190114753359474, 0.38513660841556924, 0.7769587018807675], [-1.1264915382931895, -0.03214359599978656, 0.08200873376091533, 0.18150524479339075, -0.2468216038940892, -0.7915399304682879], [-1.095201868453644, -0.11474325694501873, 0.034072871729231, 0.49915408750466106, -0.2240626300643039, -0.39105014118974957], [0.7694590704906612, -0.7510753459425681, -1.3835061312664203, -1.756986733951345, 0.49478737595348254, -0.22296743658885618], [-0.6435237035686446, -0.04414605884419382, -0.030872583259709988, -0.2312463296722385, -0.5672433452180082, -0.9581303659113232], [0.642071731531649, 0.20877815316330875, -0.6766163798651625, 1.5239602434046557, 0.10892819277075966, -0.1902022845728414], [0.8572941329324578, -1.2797215126795873, -1.3833250269541706, -1.1393615618775383, 1.0534460458412467, -0.2805994786555021], [0.29229920586986885, 1.1589948308738234, -0.6597163808904433, 0.038737357298706435, -0.9573024996980672, -0.21164286068880742], [1.1275811548633181, -1.573157508011086, 1.5270130815814997, 0.5025577865661038, 1.5493623773404004, -1.491217789641543], [0.3560634002023989, 1.5189369948071134, -0.7559496362930124, -0.10283935377449352, -1.415962048979373, 0.2209097705791921], [-0.367351400778697, 0.02550048126058188, -1.4765018695108785, 0.48537294600462355, 1.5677863134911176, 1.5802918429462876], [-1.0362881358320994, -0.01948299708870272, 0.011684092125221195, 0.5408356064499249, -0.32231919480702487, -0.2743975476148519], [0.47721916121162433, 1.8507438465745882, 1.0509034057546587, 0.31901575969435825, 0.9717909731892979, -0.43937072132360105], [1.2717738343205394, 2.0298644888790744, 0.8338868078963604, 0.5526993452713558, 0.45287589345879464, 0.13546596970702826], [-1.1105174995017477, 0.12579870333057744, 0.038902121322495466, 0.0967770847466375, -0.45468656812059277, -0.6023575652719363], [0.01467457651077667, 0.5650408481219626, 1.5859364201687527, -0.3306750176164302, 1.1328858486331193, -1.4370967341782646], [-0.09261531748940618, 1.9317814031123857, -0.7529266268433299, 0.0831102461296934, -1.6265206049525, 0.8673028139217391], [1.3850770021706877, -0.1589556450769209, -0.7222231750483867, -0.028830910162575617, -1.3544695934263853, -0.35425075311786564], [0.8172386313767898, -0.3296914879810947, -1.4981347482513863, -1.9733200637608967, -0.05694833418577802, 0.28203081839183586], [-0.9021876020652714, -0.03185246477365561, 2.0072881208126794, -0.9023267557806328, -0.2926049060073515, 2.2265216441951257], [0.32925111275873487, 1.431174535824048, -0.7333109711736768, -0.08952964820569219, -1.3103280417416365, 0.11486013851371163], [-1.0955065097976497, 0.23636638691345943, 0.009005757913120254, 0.04467112158774679, -0.5980962923558624, -0.46972210550726334], [-0.09094960392270277, 1.612079639446656, -0.6631807297328115, 0.3186298340648222, -1.1870600708095138, 0.48612554157771404], [-0.0847193832088922, 1.2829400359956586, -0.5704546668879165, 0.5696036937406216, -0.732141965265783, 0.09392931317634107], [-0.6017637237030475, 0.4764855422491642, -1.3111979339954187, -0.41976843980490686, 1.2498377157064484, 0.969458074031988], [-1.0876449033396716, -0.03882311751742299, 0.013343366793553593, 0.45819799769609026, -0.32404657241932183, -0.30011972795930447], [-1.129357277704386, 0.001730174954765843, 0.07230275416718643, 0.15148756661984314, -0.2948639694211371, -0.7512917461498356], [0.3833191142301635, -1.894616434627119, -0.029993868189208954, -0.16659117996378023, -0.377282084575878, -1.6683236621486437], [-0.6743087964669746, -1.535501596232982, -0.13924235785975178, 3.2550948673771054, 1.0957100662242318, 1.0624239572833818], [-0.7283826608768637, 0.4882111548245352, -1.2503774645602044, -0.8653883778084206, 1.2718480172071371, 0.4795130495841843], [-1.0532757590675166, 0.5474289068565733, -0.07510234526771815, -0.10191974184585212, -1.0015540546913706, -0.09657582070438447], [0.43253479661656147, -0.4620483204739776, -0.27353620697717373, -1.1832133276748855, -2.100470265047763, -0.5256176636063559], [1.2165550081059047, 1.5682691234630464, 0.959243143005555, 0.7842546455665532, 1.0556777536351267, -0.41787192706490967], [0.11583986200814116, -1.705924639129003, -0.17100944286931746, 1.119544372362371, -0.5039504274490183, 0.21225817999462823], [1.559666603261271, 0.06757346268190886, -0.7729367745834879, 0.1352311641492684, -1.5691458957219422, -0.07506549011508272], [0.5591473976562443, 2.4000383999040777, 0.9029186057750148, 0.07400304490447618, 0.2633852929059495, 0.21993602864758513], [-1.106064197480183, 0.15860075692768086, 0.03003278370833007, 0.08131883744614639, -0.49723185177198564, -0.5630086767079833], [-0.5365826061444998, -0.828821151503061, -0.03090148764735178, 0.8071495819919124, 0.24536867720197483, -0.822727947556421], [-1.0362881358320994, -0.01948299708870272, 0.011684092125221195, 0.5408356064499249, -0.32231919480702487, -0.2743975476148519], [-1.1331110656035799, -0.01839658476761943, 0.07766999971193135, 0.15904951249927257, -0.26932087227520174, -0.775488385629048], [-0.10275493693460909, 1.7473641571452263, -0.7019707016794317, 0.1980675460652914, -1.3791290701466665, 0.6468494467847006], [0.6749559192293838, 2.7028980140231935, 0.826500461180368, 0.07190825276398634, -0.08833741796034839, 0.5871078179557219], [-1.1298189947658828, -0.016371910540320778, 0.07734359174755166, 0.16377619812754576, -0.27028686045826195, -0.7729034619096025], [-0.42687063789857727, 0.738391797640892, 1.6566206399612997, 0.04278758049747396, 1.2547722095949914, -1.075944779750995], [2.2188081909700124, 1.003639761972219, 0.7523053249363139, -0.2000220404196442, -0.3168874967525198, -0.16415980657658316], [-1.0815061252445402, 0.14364114495864685, 0.03602565113639751, 0.13843100184579474, -0.4631993389838099, -0.579577924994325], [1.521176112621378, -0.30791536384527224, -0.827769374313355, 1.802486349804294, -0.9022928574546443, 0.7889505877840275], [-1.1229467845126613, 0.04130897171328903, 0.06167703853182597, 0.134788506875362, -0.34562824815100246, -0.7037598930209165], [-1.1378561907133042, -0.09258739932082369, 0.0981207114450329, 0.20404278267009188, -0.17016200457704048, -0.8642109508876219], [0.05694542184633091, -1.2700268343606953, 1.9860100826292424, -1.4782075638972119, -0.764974158993881, 1.674759395784809], [0.07574981441397331, -1.128161946298119, 1.9476178998068083, -1.5459204068079895, -0.9492280024405696, 1.8449149689408397], [-0.10282677621881313, -0.7974219755327584, -0.06711133534832325, -0.6166104878617027, -1.332370091981949, -0.7760056286060656], [1.4699236888741474, -0.13523023487012809, -0.6492744702512138, -0.08502183856726186, -1.2325702171875466, -0.8189410616387667], [0.8046766217431084, 1.6473838539240178, 1.0585185782926332, 1.2820724841077504, 1.3231534811616785, -0.16756691808683744], [1.0375089625964216, -1.1279184170410461, -1.5671303983500053, -0.8424039035647299, 0.7295408606200874, 0.4780259772841493], [0.2772882161657704, 1.0484271472909412, -0.6298200174810682, 0.09084332045759708, -0.8138927754627975, -0.3442783204534804], [-1.051777134601864, 0.15635189123497667, 0.0346403525004358, 0.18516616346191375, -0.4642779801672904, -0.56288099875963], [-0.4505265255276632, 0.03334089044420513, -0.03845576315739376, 0.07580894983370655, -0.5673463083573385, -0.8557341238042938], [-0.4336371190654318, -0.18859475852177582, 1.9339263356873293, -1.237015760850109, -0.42241538878338913, 1.8865741421539322], [-1.1105174995017477, 0.12579870333057744, 0.038902121322495466, 0.0967770847466375, -0.45468656812059277, -0.6023575652719363], [0.29649783896751575, 1.1972132996358804, -0.6701227911747359, 0.018862555647729436, -1.007417755117439, -0.1658477531838907], [2.141170049878033, 0.9558911104169997, 0.7600031263632723, -0.3114932400983208, -0.2941062351858446, -0.2251210319986559], [-1.1241813110767978, 0.04054971887805206, 0.06179944151846839, 0.13301599976475958, -0.3452660025823549, -0.7047292394157085], [-0.10938190854296995, 1.60074348844801, -0.6613531715402482, 0.29216512123212074, -1.18165150297256, 0.47165255367254], [0.775007880646657, -0.6407540079242087, -1.4140266450705474, -1.826729200327298, 0.3465094281497299, -0.09111546641104311], [-0.8347001498924768, 0.009653356885966809, 2.000596757542892, -0.8054297004010332, -0.31240766376008355, 2.279512580443755], [0.8692616849484723, -0.7380841244022505, -1.4532202092388797, -1.379823120788471, 0.4241177993509101, 0.2950660333012614], [-1.1014199002928824, 0.19709331596325824, 0.01958218594585945, 0.062083910862096754, -0.5474778778416389, -0.5168636351545463], [1.5356812155928652, 0.15466536956895985, -0.7257251302227716, -0.17193034482427064, -1.594048155832892, -0.46981997989110147], [-1.1278141194992155, 0.0026792409988120607, 0.07214975043388337, 0.15370320050809613, -0.2953167763819465, -0.7500800631563456], [-0.45663630788647197, 0.7200854525304925, 1.6595718983722365, 5.0661553738564945e-05, 1.2635063125006525, -1.0993166906758245], [2.1844163405787915, 0.6798466762625619, 0.7594881106477018, -0.16012703067328105, 0.001279520432642629, -0.48119350685832224], [-0.4566190245145744, 0.7200960820701857, 1.6595701847304234, 7.547665328707357e-05, 1.263501241062691, -1.0993031198262972], [-0.8700203698770563, 0.20508436753611614, 1.9432228404860892, -1.0139852572094235, -0.5999193461914215, 2.510747782160969], [0.8172386313767898, -0.3296914879810947, -1.4981347482513863, -1.9733200637608967, -0.05694833418577802, 0.28203081839183586], [0.6139370582505805, 1.7126244992098896, -0.6397262707204792, 1.572719824515266, -0.9371478414275283, 0.6425121646118953], [-1.082823786262873, 0.3332570690476671, -0.017227023086457124, -0.001877067864670706, -0.7240259074321056, -0.353517785837527], [-0.544021302352499, -0.1903294736728927, -1.3478715931516472, 0.44032570726527, 1.8737083212114454, 1.2428694458248486], [0.8801996553419459, -1.6525675116685195, -1.3505086676887978, -0.6265256191169861, 1.4697312159994538, -0.22667972718929624], [-0.4566190245145744, 0.7200960820701857, 1.6595701847304234, 7.547665328707357e-05, 1.263501241062691, -1.0993031198262972], [-0.3606088037860721, 0.07124331120151468, 1.6206646062806227, 0.9191646261879164, 1.883787673534396, -0.8024901759014307], [1.2521663150883413, -1.0146723047037585, 1.3718922933261672, 0.0017748265957748396, 0.8525141497389762, -1.2452295681679721], [-0.12510918656933356, -0.9358528575831849, -0.02993663099789216, -0.5579422153671942, -1.154740397123501, -0.9422459462579338], [0.9699127647967354, -1.6327623012525752, -1.4305567331700118, -0.6027166612262965, 1.3848578531080815, -0.12752894685820218], [-0.14540375590207266, 1.5429532531834929, -0.6477914978672816, 0.26634882542169175, -1.1221984020331155, 0.40086995791814034], [2.2461448747310637, 0.7837512070505703, 0.8893345099391893, -0.1874168587812247, 0.10754180289200757, -0.9223507288670239], [-1.0839751783728133, 0.14212263928817298, 0.0362704571096826, 0.13488598762458986, -0.4624748478465148, -0.5815166177839088], [2.1921347152111115, -0.022799343181510428, 0.8025781728266176, 0.433129182448938, 0.7545381240818086, -0.750477160249121], [1.6602848861964412, -0.29258121286945116, -0.9001218003358, 0.5237438818431223, -1.341645055688965, 0.13978908673169976], [0.21463637617982315, -2.495608775826207, -0.17023081395308365, 2.146246463782174, 0.31105144973585563, 0.34126549706762266], [0.21991522002108985, -2.4567259608333187, -0.1807443268506883, 2.1279226058529748, 0.26061922944391686, 0.38790878266798284], [-0.11973052190979767, 0.2801540013281124, -1.3724736748351767, -0.7002954834325844, 1.1817512005836475, 0.5829958138731927], [0.8789532790492489, 2.2632450579195407, 0.8913121965707568, 0.9742675735780825, 0.519226482432005, 0.5707285437618866], [-0.09751803811095652, 1.9109480320846732, -0.7474454649190162, 0.08902257872485539, -1.6006403779306773, 0.8422040360644404], [0.8243915122977062, -1.068588051841809, -1.3638553496092394, -1.2240703283902723, 0.8527916718323872, -0.10140189430179734], [0.4526754966731685, 0.9495667175674827, 0.29455046230851095, -1.2037284937783266, 2.8598246492593997, -0.3259632397129816], [-0.9209407984177701, -0.1804192838630412, -0.08920148450269247, 0.9429821248635526, -0.25292127621398963, 0.10791031770590907], [-0.038161130428434005, -0.41270875104438454, -1.3267158047944738, 0.02489756672282227, 1.9622230495626283, 0.32920139386448016], [1.4531008012467383, 1.1313488569230847, 0.8635364661792215, 1.4844932011768994, 1.4632682074139745, -0.2875300872925886], [-0.6248553223920364, 0.528490898319511, -1.25408479167031, -0.6997435552962342, 1.2735564480441994, 0.5329070306295692], [-0.9018616870523392, -0.03165202202515321, 2.0072558064242063, -0.9018588139034339, -0.2927005388374747, 2.2267775516433512], [-0.006561778331065749, 0.42725330630726027, 1.6230074083885018, -0.27050484076353776, 1.3102086007463996, -1.6025156923182788], [0.3168598998789373, -1.2637331803744305, -0.05726967453197702, -0.818283137252845, -1.0644211820857787, -1.487661486215601], [-0.10053358325130633, 1.873457180623051, -0.7371563574969224, 0.11059603302349304, -1.5508722745145924, 0.797337885521199], [2.0438547651212247, 0.7891319783126902, 0.7996222621744893, -0.373507135610482, -0.11890136677929236, -0.42902775085600575], [1.2948576736912822, 2.323540910175722, 0.9782625545138273, 1.5148160337301888, 0.7731108952521528, 0.16678431017767084], [1.6283114401761014, -0.13433066253514095, -0.865759075048732, 0.47922386985017235, -1.4692471925018902, 0.2559687251808736], [0.4186106220140134, -1.1426351082518615, -0.1571472575975235, -0.5160318028718485, -1.2823121569168818, -0.8406033064658139], [-0.9021876020652714, -0.03185246477365561, 2.0072881208126794, -0.9023267557806328, -0.2926049060073515, 2.2265216441951257], [-1.0803045249278158, 0.35262457593481533, -0.022471865644559833, -0.011211520854702527, -0.7492067590093929, -0.330290492753107], [-0.6736545073104236, 0.516296771014414, -1.2542414236756763, -0.7827599472026022, 1.263433903258544, 0.5158392403134164], [-1.1105174995017477, 0.12579870333057744, 0.038902121322495466, 0.0967770847466375, -0.45468656812059277, -0.6023575652719363], [-0.13208298032601073, -0.9702050523855638, -0.02081738728599822, -0.5461028367508495, -1.1114555287694527, -0.9835739170445871], [-1.0052656274349965, -0.2681831084762863, 0.01029256819626518, 0.9787157663432111, -0.07186247800043795, -0.07200630955984189], [-1.1357323852611159, -0.05564498149052187, 0.0879200197802907, 0.1811889874018959, -0.21966844594403867, -0.8200449915568855], [0.32604319088120526, -1.937782308570306, -0.33381700895822014, 1.2917248627989322, -0.3914495106379681, 0.15929949688864473], [-0.4566190245145744, 0.7200960820701857, 1.6595701847304234, 7.547665328707357e-05, 1.263501241062691, -1.0993031198262972], [-0.6348544850775346, 0.02688509305466032, -0.050150044799980906, -0.2665545635241578, -0.659908955726734, -0.8729727989929261], [-0.12952282475134763, 0.999609157692382, -1.5014963170285212, -1.4403832898386086, 0.29722730471145115, 0.9431068837666461], [1.4738507829068714, 0.14458771427441133, -0.8741974177299406, 0.11637220651972266, -1.8297659688069534, 0.5097127353208389], [0.29690934782222794, 1.1974663839142925, -0.6701635921702834, 0.019453391351263574, -1.0075385036403215, -0.16552463771895995], [0.5429835200093737, -1.297959160338576, 1.8778194082441986, -1.873599364011705, -1.061855175926826, 1.4893305446175078], [-1.1430161043690492, -0.11357894802254392, 0.10362737399156363, 0.20958584295054528, -0.14420630972841575, -0.8895116759105022], [-1.1085555174782151, -0.03893078957561462, 0.08522544043234326, 0.22020895893469794, -0.22764291989224533, -0.7987058000219348], [0.7852956020144612, -0.6344269009639003, -1.4150466699592346, -1.8119583077389445, 0.34349071507766693, -0.08303757978777657], [1.4298997714869566, 0.05903735372908202, -0.7800506608970035, -0.10289152661427492, -1.6288350900028823, -0.091961292455718], [0.5767549692573237, -0.5330529527000917, 1.57454523439194, -0.22644193757977912, 0.7447598614860401, -1.1677058144759533], [-0.6345631368083985, 0.027064276723775794, -0.050178931904828694, -0.26613625184605594, -0.6599944456809347, -0.8727440332437555], [0.9573667137049985, -1.1952917681137996, -1.4730461058331525, -0.8136165571614185, 0.8849307417268601, 0.3226426758120687], [2.4141153749615665, 0.5410906038910349, 0.7418305964454079, 0.5719093290033606, 0.21023576440811034, -0.13740507512914854], [-0.5535909305349368, 1.0534089007235157, -1.3960172157879747, -0.947115637340432, 0.5927214741030293, 1.1625913862344273], [0.5439799135060545, 1.9987114710067824, 1.014313743862842, 0.3371601042191091, 0.8055516826067719, -0.25945532573735225], [-0.4505151136726288, 0.7238500811718759, 1.6589649835634672, 0.008839342643808942, 1.2617101782227746, -1.0945103481349805], [1.648370807557644, -0.6206347577227879, -0.8090294703722151, 0.7397658915616245, -0.8981998202908558, -0.2520509959550368], [-0.617332857997708, 0.8894797776230468, -1.3547318111361342, -0.9479740619769887, 0.7825165826418906, 0.9637971854907952], [1.3607291311541103, 1.9651784658502993, 0.7040853214513487, 0.8351924937864156, 0.3757235287420512, 0.6311921333469054], [-1.1271795728452494, 0.0030694969561238803, 0.07208683529874917, 0.15461426916294582, -0.29550297060423136, -0.7495818191094223], [-1.1105174995017477, 0.12579870333057744, 0.038902121322495466, 0.0967770847466375, -0.45468656812059277, -0.6023575652719363], [-1.0966735447831037, 0.25346676483458996, 0.0041264106731321885, 0.03004396236545598, -0.6221954786651683, -0.44938928075769197], [2.213529347128745, 0.9647569469793297, 0.7628188378339185, -0.18169818249044425, -0.2664552764605814, -0.21080309217694262], [-1.0644860956671134, 0.48708001013994084, -0.059005667956931074, -0.0791606405803255, -0.9249397360704026, -0.16912567282436938], [0.27760354850047875, -1.6348072852641007, 1.795532256785787, -0.6616228435279445, -0.5728425232850344, 2.387685481853595], [-0.5732700152055393, 0.5474639786591144, -1.3290082531192005, -0.4177124415219995, 1.1681519591244145, 1.0555787756790607], [-0.8981053366725542, -0.11292075628126028, -0.10645078993769641, 0.9369141322228659, -0.33294674060638824, 0.18958818171018504], [0.8525863832527951, -0.6943777288643159, -1.3933105056593575, -1.6416852038732246, 0.46275073473643435, -0.1510499465663175], [-1.0890416437924784, 0.5076142149479357, -0.0665611086773425, -0.1403200870157509, -0.966617688453359, -0.14590825843148253], [-0.45663630788647197, 0.7200854525304925, 1.6595718983722365, 5.0661553738564945e-05, 1.2635063125006525, -1.0993166906758245], [1.1393708603169008, 1.948434622299906, 0.8470145282137651, 0.36259795765924424, 0.49172673069624256, 0.03150356886558877], [0.9675731141231646, -1.3977666985874568, -1.4889212164787926, -0.4485292958826092, 1.0605259200992132, 0.5790828861116615], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [-0.12422423326125977, 1.2586439452680744, -0.5665377713153573, 0.5128834662013436, -0.7205501070690618, 0.06291022854299769], [-1.0691122905913162, 0.4307804618779058, -0.04356180657490359, -0.04694816805825272, -0.850257393815555, -0.23650567750546392], [0.952214767457153, 1.7748109328670847, 0.9553600314972281, -0.06227660030463564, 0.7346780954634278, -0.6826146407068375], [-1.0341734138728063, 0.11878952816243102, -0.026923726611412773, 0.44431034484384163, -0.5108280393240812, -0.10938984298941158], [-1.1066987441341487, 0.15821050097036907, 0.03009569884346396, 0.08040776879129669, -0.4970456575497005, -0.5635069207549063], [0.29007785218656584, 1.032901807395998, -0.6245307250729523, 0.1262088703405046, -0.7855592953301416, -0.3621312994253059], [1.755052336609587, -0.8814116937275482, 0.08231472576409381, -0.36777683424764746, 2.852059736196209, 0.06698860522412996], [1.808826431805979, 0.5008351049764008, -0.8027579916659587, 0.09073022426139203, -1.9186130903260106, -0.04285544123025557], [0.5202807169433498, -2.146470164219797, -0.48892034966215486, 2.075813135208738, -0.3209046772061796, 0.9861191140766243], [-1.1146325880488694, 0.12326786054645415, 0.03931013127797045, 0.09086872771129585, -0.45347908289176747, -0.6055887199212426], [0.6000665442297305, -1.4537861233864628, 1.8523014341284012, -1.4541591935675748, -0.9244565739103122, 1.803827310197326], [-0.9021876020652714, -0.03185246477365561, 2.0072881208126794, -0.9023267557806328, -0.2926049060073515, 2.2265216441951257], [-1.0928670878770161, 0.25580779440990387, 0.003749001464317814, 0.03550919262314688, -0.6233124025018316, -0.4464004627070836], [1.199433706628629, -1.3796292105530799, -1.5580878755351715, -0.15587021508478016, 1.0561505405495395, 0.6836015404531799], [-0.9435062938482569, -0.17647928841935184, -0.09195917997421167, 0.8976315090574866, -0.2707415393334048, 0.11144113827908567], [-0.07986981520248568, -1.1702363389868893, -0.03430007620739356, -0.10370069063820855, -0.9161000153891028, -0.7220454877067431], [0.9641657382759578, -1.7562247351068698, -1.3886822069684432, -0.19439043216651306, 1.5503583002726964, 0.15142388589974193], [-0.6912786167930551, 0.46982142766915497, -1.2425038809116469, -0.7821611603794267, 1.317488579236958, 0.4595024907651369], [-0.40516138552179165, 0.5374182576684038, 1.6411673475734918, 0.4284412992471718, 1.4346368196108525, -0.8171187502739511], [0.4456152548121596, 1.2048976381474374, -0.7347363019258895, 0.4927344270205671, -1.0436744557133266, 0.34840919049182295], [0.14907575613210897, -0.018991606958080706, 1.5316719482282475, 0.6135810418238762, 1.6853793158155002, -1.0609947913536373], [-1.130624729103409, -0.01686744955745209, 0.07742348009683361, 0.16261934182002585, -0.27005043485045793, -0.7735361219899366], [-1.096692531961554, 0.004001373871871975, 0.07405911160597876, 0.2113384722620445, -0.2800071165503042, -0.7468926669826844], [-0.07942565218421017, 1.8686207606652259, -0.7342541448991744, 0.15385390876764138, -1.5326243201271266, 0.7926625899712378], [-0.392279199440634, 0.92029520394057, 0.5620454472613929, -2.0525818311833106, 0.6952747335831784, 4.018299609533788], [1.4479709466343653, -0.05964089719705152, -0.6720731538585457, -0.18129889667903779, -1.3483367913383377, -0.7299324013058495], [-1.1172564318166824, 0.08044487098459054, 0.05112272463867389, 0.11705548464969633, -0.3961812169658235, -0.6567934919556259], [-0.9022563240440081, -0.03189472984815047, 2.0072949345789355, -0.9024254253431228, -0.29258474100403026, 2.2264676839124817], [0.4279096697065714, 2.062030983315501, -0.902934811881742, -0.36232754330096206, -2.1214093904520994, 0.8723001916595772], [-1.0547341423221068, 0.564350101108588, -0.07995280540285868, -0.1169652127462451, -1.0255677510464756, -0.0764717617039838], [0.9605511014563831, 1.7264835393750377, 0.9695186615195096, -0.011452803121237273, 0.8055568592474763, -0.7398165082426167], [-0.29870398765921674, 1.1513362112212462, -0.5492381492032219, 0.26236912790286593, -0.6693527333668767, -0.07409072858760184], [0.1755568643531234, -2.3287093755977812, -0.14649788339248906, 1.752655355367663, 0.16837009984323262, 0.0409050330785208], [0.7694590704906612, -0.7510753459425681, -1.3835061312664203, -1.756986733951345, 0.49478737595348254, -0.22296743658885618], [-1.1012997397073063, 0.1971672165725547, 0.01957027205515969, 0.06225643488752884, -0.5475131364103208, -0.5167692854387865], [-0.48739945889755254, 0.17797271198932582, -1.3735878946244544, 0.08052813839952305, 1.5074124203786134, 1.1876468022558677], [0.5349866552071134, -1.9108556396806278, -0.4805103493493251, 1.7335429274191139, -0.5282514795255181, 0.7704891711165828], [-1.0521999487172546, -0.02926900688207238, 0.01326174422005602, 0.5179897623013701, -0.3176502116727267, -0.28689145329732574], [-1.1332641510240422, -0.03630885705389676, 0.08268023654563605, 0.17178127078462574, -0.24483432470448874, -0.7968577647901166], [2.0775356808953727, 0.5731452237113246, 0.9360224182290093, -0.3517930399306764, 0.30366635288795585, -1.182237202023611], [0.15708091090869017, -2.464799228072893, -0.10970058825087234, 1.8167888581198626, 0.3448828708650174, -0.12234646652273881], [-0.44442213986669477, 0.07830446683288031, -0.050613451338438406, 0.05461948127579804, -0.6256654652898225, -0.8017964411675276], [1.1163636117006264, -1.6557834898709756, 1.5493542964889107, 0.5414959846656534, 1.6565308454607701, -1.590334771542308], [1.2182101916292758, 1.9791039019291035, 0.8441926875441647, 0.4887453559236052, 0.4930346333114743, 0.07215878812167942], [-1.1357323852611159, -0.05564498149052187, 0.0879200197802907, 0.1811889874018959, -0.21966844594403867, -0.8200449915568855], [1.0444726702871912, 2.2057315332103262, 0.8413164569708775, -0.20179731309492824, 0.19433278960008876, -0.16394130779176494], [-1.1125750437753084, 0.12453328193851586, 0.03910612630023315, 0.09382290622896672, -0.45408282550618023, -0.6039731425965896], [0.31857342687646817, -1.2448612125044136, -0.06243462874079752, -0.8287744465503966, -1.0893656080552623, -1.4650668532115156], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [0.8848940524505503, -1.1736565254529328, -1.4110368440423446, -1.1644919570768435, 0.9231392968105127, -0.1526822233820292], [-1.128241681325771, -0.015401838501166302, 0.07718720153161811, 0.16604087137919213, -0.27074968954647066, -0.7716649603325233], [0.5433094350223058, -1.2977587175900738, 1.8777870938557248, -1.8731314221345063, -1.0619508087569487, 1.4895864520657331], [0.04491206753730889, -1.3308818996341523, 2.002188361931125, -1.4566301340387535, -0.6881183433271483, 1.6015633127349629], [-0.3869247062053312, 0.23949971882635, -1.3024067891306939, 0.35568999761675274, 1.5853624551927306, 1.1953325821664775], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [0.24374040288866022, -1.4654738473348432, 1.8266496098674196, -0.7177566951885931, -0.7166870968974408, 2.516984326882167], [-0.688909161187151, 0.4178243121401292, -1.2277536364539265, -0.7399044808972733, 1.3901181966028027, 0.3976154489878636], [1.104422554371947, 2.1046025208128816, 0.7167828183278016, 1.4513778178833137, 0.539001058278068, 1.0323599678945037], [0.8256381159570565, 1.3902286237468604, -0.8322341442243764, 1.272232339190645, -1.196568704960929, 1.0864707211501923], [-0.4257887866585741, 0.9473015810920423, 1.5981349962700466, -0.10702687539275169, 0.9687999273895668, -0.8267513741100718], [-0.14408568478741082, 1.5794001319966635, -0.6579123011827419, 0.2423381738459756, -1.1714684177923091, 0.44440325716854256], [-1.0925082480291977, 0.2738466088353876, -0.0012816358671602218, 0.02307285218956054, -0.647859324333986, -0.42486952581354914], [-0.6410451118758346, -0.38167360785840226, -0.009453751480066148, 0.2174572991493776, -0.2106445113893955, -0.8631484297982218], [-0.36580933084026457, 0.39503097879790683, -1.3444608407211127, 0.2823945658999535, 1.3836335740249759, 1.381905724567917], [-0.6836303173458844, 0.4567071271330184, -1.238267149351531, -0.758228338826473, 1.3396859763108637, 0.4442587345882232], [-1.1116491488522062, 0.12510272156494362, 0.03901432406025105, 0.0951522865619185, -0.45435450968266566, -0.6032461328004957], [-1.0717517125119496, 0.41133905438146123, -0.038305050126101366, -0.03778623909365253, -0.8250412836695857, -0.25982732030564415], [1.385642826845917, -0.1586076541941039, -0.7222792764172644, -0.028018511070216193, -1.3546356226453482, -0.35380646935358606], [1.0339938394385935, -0.8808930706100206, -1.5555695421644629, -0.8978702783123444, 0.4958217224778955, 0.701547709658527], [-0.05243557128653002, 1.159742502464364, -0.6069366579117508, 0.9186339208957647, -0.6531467465232368, 0.44605516158816644], [-0.575144052285189, 1.325243357512718, -1.4738011646589526, -1.185286034821823, 0.20797973281431084, 1.485654773973054], [-0.20105718292317645, 0.4032526322629076, -1.3395568920043341, -1.141592599347657, 1.0759080061644348, 0.22821384358433447], [-0.7283826608768637, 0.4882111548245352, -1.2503774645602044, -0.8653883778084206, 1.2718480172071371, 0.4795130495841843], [-1.1388689098782412, -0.07539211479528847, 0.09322606383171443, 0.18963718683662653, -0.19430647158242745, -0.8437569578387016], [-1.1205810180436662, 0.051673015697235467, 0.05894494417674307, 0.13170944675144589, -0.3585432459691543, -0.6912777121089726], [0.31480686569674027, -1.3362683175782615, -0.0370858829117471, -0.769424619909505, -0.9660522512243144, -1.5742702301025993], [0.6958149788696649, -2.282635648119244, -0.5923407087208593, 2.5732754149026373, -0.2523704028048603, 1.4010825713883195], [3.0545569274227, 2.770207590973579, 0.6758823475765972, 3.240510341548397, 0.31115644853256474, 1.1815248174253403], [0.0778415139224757, -1.1268755189109494, 1.9474105083464397, -1.5429171889269253, -0.9498417671823818, 1.8465573648490816], [1.137591243315812, 1.7462753391286088, 0.9769404699298819, 0.3074954607599696, 0.8758163338369207, -0.7070510277297916], [-1.1276231876436482, -0.03283957776542052, 0.08212093649867098, 0.17988044660867183, -0.24648954545616228, -0.792428497996847], [-0.6611116868638082, -1.438294558750759, -0.16552614010376382, 3.209285222554106, 0.9696295154943854, 1.1790321712842813], [-0.3583888442233852, 0.7740417748837921, -1.5312359585720288, -0.10983624937327098, 0.7607497894779843, 1.9051716652397452], [0.3177994802968418, 0.4430192243267064, 0.19601316898616894, 0.5760099870896248, 3.5456616756777755, 0.8741446332022891], [-0.8166611666850929, -1.0276317353192537, -0.06420929362983481, 2.3512288771413843, 0.6433162184817895, 0.595082056612905], [-1.1263705588164137, -0.049887314156641306, 0.08699179713158532, 0.19463049965729773, -0.22241547483961577, -0.8126941147297129], [-1.1387833160364607, -0.07533947326537874, 0.09321757722464061, 0.1897600806629616, -0.19433158727518693, -0.843689749821996], [2.081679949149817, -0.09220308869072945, -0.7709708343282078, 1.1035165286718642, -1.1698278726094442, -0.16770334119057467], [0.4348764743479867, 1.0022937765362019, -0.6787259116491137, 0.6197830193388874, -0.7716654819379762, 0.1062361870297308], [1.050520855725491, -1.2565010974655801, 1.579365062907938, -0.2700953497674148, 1.288118908119258, -2.112325179536497], [-0.44442213986669477, 0.07830446683288031, -0.050613451338438406, 0.05461948127579804, -0.6256654652898225, -0.8017964411675276], [-0.4653801154680738, 0.6556804686453731, 1.6769863430020833, 0.030402059076247108, 1.3470415795791062, -1.176576016112771], [-0.10581242709257291, 1.834574365630162, -0.7266428445993177, 0.12891989095269282, -1.5004400542226535, 0.7506945999208393], [-0.6103083812152237, 0.9828905509290852, -1.3804035784469337, -1.0026462423530003, 0.6582472597552821, 1.075558667517735], [-1.0691122905913162, 0.4307804618779058, -0.04356180657490359, -0.04694816805825272, -0.850257393815555, -0.23650567750546392], [0.542914798030637, -1.298001425413071, 1.877826222010455, -1.873698033574195, -1.0618350109235046, 1.4892765843348645], [0.995338472093254, 2.0016113744204005, 0.8187317445258653, 1.4515626475884336, 0.7273249328678912, 0.8330024524502703], [-1.0760371698514317, 0.39088530977136693, -0.03288508969510901, -0.030987652943189274, -0.7993421794320864, -0.2844414249655467], [2.7380001227200568, 0.6814983836257269, 0.8806496847559423, 1.0116787291940585, 0.410665275972825, -0.5214622973904622], [0.29385841704688237, 1.1777718921394351, -0.6648660347259338, 0.028024484612329165, -0.9822016449714699, -0.1891693959840705], [0.6250055640830882, -0.8115927032780484, 1.5750963662558874, -0.06383974811406255, 1.046321909421397, -1.426495109580354], [-0.9109655648818141, -0.09627845465388356, 2.024705951925157, -0.8720243976215176, -0.20905961680149865, 2.1492355001745906], [-1.0922365315200786, -0.0951014067000716, 0.02878380089195488, 0.4904601004172601, -0.2493743730403964, -0.3674725909413447], [-0.5931822593444368, 0.7617877882768461, -1.317165869055938, -0.809686702871433, 0.970963126292568, 0.8127667442317282], [0.8163867874149884, -0.41930599711094796, -1.4730749958737983, -1.9097853478318727, 0.06550976085759241, 0.17511606833885596], [0.31233437049131574, 1.313861744614548, -0.7016633298675504, -0.03610901813987029, -1.1587144159932545, -0.025917896382811043], [1.7534467303882093, 0.14604953321576136, -0.7073560565476701, 0.2443452127941, -1.4624137921984364, -0.4688245949002932], [0.16897600090635947, 0.14136536947261572, 1.488176053491309, 0.5344900452245421, 1.476362289932175, -0.868729581506382], [1.9960742110342629, 0.5230451661253762, 0.9440993017055995, -0.46875369413088874, 0.32756948898073185, -1.2462004932303543], [-0.1894483867331376, 1.21853008713972, -0.5600708135210802, 0.41923600719118165, -0.7014114661921838, 0.01169642735148829], [0.4591712042631443, 1.320143490055144, -0.7660507215013797, 0.4344884417639814, -1.1943019282753282, 0.48654834138625674], [-1.1028054629862265, 0.14278680639372016, 0.03470473948795023, 0.09894921454950129, -0.4737464302044912, -0.581699105549312], [-0.6123659254887844, 0.9816251295370235, -1.3801995734691967, -1.005600420870671, 0.6588510023696946, 1.0739430901930818], [-1.1367438740259983, -0.05626706264685943, 0.08802030862734643, 0.17973671324260895, -0.21937164607479342, -0.8208392093696849], [2.7655974731525585, 1.2457675281794616, 0.797870452740761, 0.4547897464818261, -0.24041353948646807, -0.3444847584345559], [0.7565319272022234, -0.7768438603993211, -1.377229349928931, -1.7625956975750985, 0.5230221991715144, -0.2543669660123024], [-0.8702956658250787, -0.012238433196699969, 2.0041260436577493, -0.8565369887567368, -0.30196291653074636, 2.251563092727252], [-1.0992775810687414, 0.21622899765138084, 0.01437471709967839, 0.05220825236762772, -0.5725481237719663, -0.49393231583600267], [0.8943718748741667, -1.3816449875690353, -1.3520358662114653, -0.9954652391482295, 1.2136577662738923, -0.40023039049112263], [-1.1192833996434755, -0.2720987483953718, 0.07642101689555622, 0.5681907754703862, -0.02146339354361296, -0.5799522998624095], [-0.6598755200601845, 0.6316797946010201, -1.2855779573907529, -0.8406856995078723, 1.1127409849971412, 0.6541535197898427], [-0.6176447693300511, 0.942742314544134, -1.3696860605715915, -0.9872765629414711, 0.7092832226616331, 1.0272998045927217], [0.4082699413316785, 0.9146578596923424, -0.6561076470275514, 0.6333881427594553, -0.6660918489616809, 0.0003481126967154875], [-1.129937924951404, -0.034263176831489786, 0.0823504420986255, 0.17655699577629233, -0.2458103350149481, -0.7942460224870821], [0.8832318866493926, -1.2182687725512196, -1.4643514146904244, -0.6070925253371113, 0.8749651114506677, 0.7178910373490335], [0.5429320814025348, -1.2979907958733778, 1.8778245083686418, -1.873673218474647, -1.061840082361466, 1.4892901551843916], [-0.6049430782531391, 0.31416708870177906, -1.265927175733572, -0.3077692939468003, 1.4707452808760462, 0.7757190620769016], [-0.11615008599583872, -1.2103673862435416, -0.02570783837258384, -0.1428395804375248, -0.8810127134974878, -0.7717818197650044], [2.0179535403436764, 0.43345214864664716, -0.768547246902242, 0.533457606292378, -1.7111190863386025, -0.11239043670361572], [-0.4547513110308468, 0.41276363338657385, 1.7458633181751375, 0.2269844428017816, 1.6861055736643844, -1.4657187896826818], [-0.7283826608768637, 0.4882111548245352, -1.2503774645602044, -0.8653883778084206, 1.2718480172071371, 0.4795130495841843], [-0.688909161187151, 0.4178243121401292, -1.2277536364539265, -0.7399044808972733, 1.3901181966028027, 0.3976154489878636], [-0.688909161187151, 0.4178243121401292, -1.2277536364539265, -0.7399044808972733, 1.3901181966028027, 0.3976154489878636], [-1.0768968675568038, 0.14981193009520666, 0.03463343823996081, 0.14262400502754494, -0.4691279426450737, -0.5719803459813348], [0.3554356573351139, -1.4567008193467088, -0.38545963314494897, 1.1280878475867213, -0.9281242161004389, 0.6636502892599734], [2.9512391898070605, 3.004241715864285, 0.757157281734599, 2.807880353637908, 0.1481068869929452, 0.8870130799096002], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [-1.1111520461557138, 0.12540844737326565, 0.03896503645762971, 0.09586601609178773, -0.4545003738983079, -0.6028558093188593], [0.2914852716533404, 1.0571585548961668, -0.6312276518274565, 0.11122715222952492, -0.8180585995022441, -0.3331308369133727], [0.07125166580685258, 1.3249026687002987, -0.6441755074602635, 1.0314636624716826, -0.8116482756470372, 0.649419877922948], [3.115876943719322, 0.1456304177426125, 0.6762680160819693, 2.447333422152318, 0.8162240412214479, 0.4929438770172246], [-1.0635831782194898, -0.11311539826363666, 0.035932945298704805, 0.5575030887884787, -0.20889883444830384, -0.3874724215344522], [-0.6372025587290314, 0.007622869227331057, -0.04492217545602619, -0.25697432288145594, -0.6347783355349657, -0.8960656760439354], [0.22936688170848707, 1.712567875336896, -0.8957205262782671, 1.115369391822943, -1.471251589878126, 1.688181263847569], [-0.9669426204189949, -0.511619193068234, 0.00027557786470355376, 1.0971101190770325, 0.1760846547054148, -0.2894461774257092], [0.7747379143319274, -0.7121925309496788, -1.3940196441640254, -1.7753105918805447, 0.4443551556615441, -0.17632415098849608], [-0.0795820369322959, 0.9292295395695643, -0.5443043914771443, 1.0350762612098394, -0.3518816585233131, 0.16974971810024453], [-0.8950948781230732, 0.02039099631989237, 1.9931620267497125, -0.9269469320457241, -0.3603662998047135, 2.2891921755592595], [0.8731332335418476, -1.6212772250280003, -1.3597981507199777, -0.6625745481522103, 1.42292145139399, -0.1897299055368562], [-0.5837861381604553, -0.7203725159031752, 0.008622112757423611, 0.44074884261943365, 0.17839603200870044, -1.1932148976785282], [-1.0921790533249562, 0.2205947014539935, 0.013670899926484164, 0.062400168253591765, -0.5746310357916896, -0.48835857406594896], [-1.125174697578582, 0.02212064849525668, 0.06689299398508099, 0.14454127154349622, -0.32053288652791584, -0.7267584203561657], [1.2627798327238289, -0.9130354306384091, -1.6842500303064287, -0.3757565102351778, 0.45096389704627726, 1.243320967657498], [-0.1490688132801324, -0.6115255199400678, -1.2791055585154356, -0.03940643800300775, 2.1739236832739417, 0.08636056119208928], [-1.1123890968301278, 0.11907460169906749, 0.04065001273661656, 0.09814079101696235, -0.4464926887348794, -0.6104733218201966], [-0.7082789036109999, 0.3168210104817211, -1.2008578392767026, -0.7029573716272861, 1.5180099751758873, 0.27616050301300443], [-1.0955065097976497, 0.23636638691345943, 0.009005757913120254, 0.04467112158774679, -0.5980962923558624, -0.46972210550726334], [1.5147938615249146, 0.1952736925694299, -0.7386393298808548, -0.2407746309654606, -1.661244089669024, -0.4224731340357079], [-1.0839751783728133, 0.14212263928817298, 0.0362704571096826, 0.13488598762458986, -0.4624748478465148, -0.5815166177839088], [-1.0808370132593037, 0.37011520981325763, -0.027414128019682345, -0.02492761142214368, -0.7734921395409837, -0.30945942395661236], [-0.011823338800434553, 0.3883811208540641, 1.6335192076442937, -0.25215616773478944, 1.3606357496003767, -1.649145407069112], [1.8871153129855653, -0.3440109110722508, -0.9424701851149366, 1.1869037799950877, -1.2540552193614873, 0.5875709815954152], [-1.1441477537195077, -0.11427492978817781, 0.10373957672931909, 0.2079610447658265, -0.1438742512904889, -0.8904002434390617], [1.5523446922335293, 0.34816033307144484, -0.8521317442834845, -0.08250636219680159, -1.9580634831775068, 0.25917219974724076], [0.5215296584734507, 1.8479322799693465, 1.0549378253964465, 0.40448803786364557, 1.0000275568791128, -0.440430427293271], [0.08201993009234734, -1.0853334631444715, 1.9360708963656201, -1.5652458890609113, -1.0045272053805896, 1.8963150023865185], [-0.8095090619123128, -0.1657876377388758, 1.978240860582025, -0.43177929698079914, -0.16565105122017548, 2.568967897491447], [-1.0994993884679407, 0.19827446029060858, 0.019391767699639344, 0.06484134109049058, -0.5480414111979318, -0.5153556552797149], [-1.1159338506106564, 0.04562203398599201, 0.06098170796570572, 0.1448575289349909, -0.34768604447796625, -0.6982533592675683], [-1.0593603372463094, 0.5080505528465529, -0.06450894402083134, -0.08475274022417213, -0.950885408791628, -0.14385176638507852], [0.36897798192231457, -0.023498695657202844, -1.4651828394330149, -1.7090608797795113, -0.12097979593694867, 0.8012570657980642], [-0.45663630788647197, 0.7200854525304925, 1.6595718983722365, 5.0661553738564945e-05, 1.2635063125006525, -1.0993166906758245], [1.2064609609635268, -1.156157914980073, -1.774672328578497, -0.23708716263203108, 0.5386472874667052, 1.518732423903261], [-1.1389717870919192, -0.07545538586489157, 0.09323626408060141, 0.18948947791074297, -0.19427628445170672, -0.8438377367049341], [-0.9021876020652714, -0.03185246477365561, 2.0072881208126794, -0.9023267557806328, -0.2926049060073515, 2.2265216441951257], [0.29133064339964254, -1.5823421467286096, 0.03017753830693207, -0.6347610667885416, -0.6414224901797795, -1.868942986966919], [-0.9022390406721104, -0.031884100308457314, 2.007293220937124, -0.9024006102435748, -0.29258981244199134, 2.2264812547620108], [2.155583762927549, 0.4521563212097198, -0.9181600148946749, 0.8469825176630665, -1.8758789566965834, 0.485302657214463], [0.9481352269308695, -0.84371248733719, 1.6105969941774165, -0.8309025864800773, 0.8803795599733012, -2.1936132584430226], [1.376359086634857, -0.36588968413016404, -0.6648508299743733, 0.10517010334579925, -1.0754089005432668, -0.601483163945439], [-0.17343745114463635, 1.2406221408456044, -0.5650910290328315, 0.43332352062721835, -0.722906463737054, 0.03887115665536912], [-0.820597416505898, -0.5176183771930696, -0.14177348437108375, 1.4450000202613054, 0.05000611526658629, 0.2792377228061979], [-1.123376395630471, 0.058862876661334405, 0.05672457590781676, 0.12122012523380443, -0.3699438158133138, -0.6828480453581892], [-1.1225533779210468, 0.05936904521815906, 0.05664297391672188, 0.1224017966408727, -0.37018531285907885, -0.6822018144283277], [0.3051532794740477, -1.3422054216655361, -0.036128732357198406, -0.7832850346787128, -0.9632196116260138, -1.5818501957944076], [-0.6663988969872523, -1.32790412247422, 1.9399044467212259, 1.3464620963265215, 0.9690863586316638, 3.2508716846903796], [-0.43778162685676625, -1.2259962538459936, -0.06677306030797717, 1.3498584997240206, 0.6297182366105605, -0.7230053472567923], [0.7311712176911652, 2.292018227760847, 0.9458031903646367, 0.4764098657290774, 0.506208095830737, 0.10001847018610248], [-0.9021876020652714, -0.03185246477365561, 2.0072881208126794, -0.9023267557806328, -0.2926049060073515, 2.2265216441951257], [-0.653888963519003, 0.31463541585862015, -1.1962604707334836, -0.5989623366856349, 1.5509336596498613, 0.27636895982758986], [0.5238803005532945, -2.725037242935024, -0.3767935306530367, 3.6900481820346975, 0.3648683733482454, 2.0731648197004278], [0.4848880870321703, 1.8253971496509556, 1.058570827641986, 0.3518788451495582, 1.0107792468536174, -0.4692012745216268], [-1.1198958537373158, 0.06100346348814587, 0.05637948108747618, 0.12621741361429628, -0.37096510681985423, -0.6801151347558058], [-0.6466784104570179, 0.7288868320832432, -1.311861739634765, -0.8864953443308718, 0.9866604342672948, 0.7707617337907424], [1.3077704068850529, 0.3530410404039671, -0.7980339436506143, -0.7452394067153197, -1.9915635057064112, -0.24504047126504896], [0.34435243209198807, -1.0864617484056098, -0.10495107801375632, -0.8953739372390437, -1.2924629322328438, -1.2748318432460168]] print(f"data has {len(data)} entries") center_points = [[0.034263148941797085, -0.1159878632167158, -1.354003405203193, -0.8650113508074581, 1.0156116253542034, 0.4817964241472219], [0.9218557884644116, 0.7116445830799667, 1.1123934237356012, 0.14194169006315296, 1.011490905244407, -0.6711496861475166], [-0.6937332641459568, -0.443989593037132, -0.029277708610171257, 0.3924171627311612, -0.4039357793744134, -0.473170515212907], [0.7590346369367189, 0.830787011873216, -0.6790700154511047, 0.48960099396540635, -1.136947295960624, 0.22894960696987418], [-0.17444734370440676, -0.6740414604313897, 1.8622281726084713, -1.0928181437635947, -0.49237147713980045, 2.1529579594640778]] print(f"center_points has {len(center_points)} entries") def euclidean_distance(v1, v2): dist = [(a - b)**2 for a, b in zip(v1, v2)] dist = math.sqrt(sum(dist)) return dist def find_all_dist_with_target(data_points, target_point): """ Finds all distances between the target and the other points. """ distances = [] for dp in data_points: distances.append(euclidean_distance(dp,target_point)) return stats.zscore(np.array(distances)) def multiprocess(func, jobs, cores): results = [] if cores == 1: for j in jobs: results.append(func(j)) elif cores == -1: with mp.Pool(mp.cpu_count()) as p: results = list(p.map(func, jobs)) elif cores > 1: with mp.Pool(cores) as p: results = list(p.map(func, jobs)) else: print('Error: cores must be a integer') return results mp.cpu_count() func = partial(find_all_dist_with_target, data) results = multiprocess(func,center_points,3) results for i in range(1,1000): print("Start") test_past = np.allclose(results, multiprocess(func,center_points,12)) if test_past: print("Test past") else: print("Test failed") print(i) print("----")
0.172555
0.607023
``` #@title Default title text import pandas_datareader as pdr key="" df = pdr.get_data_tiingo('AAPL', api_key=e") df.to_csv('AAPL.csv') import pandas as pd df=pd.read_csv('AAPL.csv') df.head() df.tail() df1=df.reset_index()['close'] df1.tail(500) import matplotlib.pyplot as plt plt.plot(df1) ### LSTM are sensitive to the scale of the data. so we apply MinMax scaler import numpy as np df1 from sklearn.preprocessing import MinMaxScaler scaler=MinMaxScaler(feature_range=(0,1)) df1=scaler.fit_transform(np.array(df1).reshape(-1,1)) print(df1) ##splitting dataset into train and test split training_size=int(len(df1)*0.65) test_size=len(df1)-training_size train_data,test_data=df1[0:training_size,:],df1[training_size:len(df1),:1] training_size,test_size train_data import numpy # convert an array of values into a dataset matrix def create_dataset(dataset, time_step=1): dataX, dataY = [], [] for i in range(len(dataset)-time_step-1): a = dataset[i:(i+time_step), 0] ###i=0, 0,1,2,3-----99 100 dataX.append(a) dataY.append(dataset[i + time_step, 0]) return numpy.array(dataX), numpy.array(dataY) # reshape into X=t,t+1,t+2,t+3 and Y=t+4 time_step = 100 X_train, y_train = create_dataset(train_data, time_step) X_test, ytest = create_dataset(test_data, time_step) print(X_train.shape), print(y_train.shape) print(X_test.shape), print(ytest.shape) # reshape input to be [samples, time steps, features] which is required for LSTM X_train =X_train.reshape(X_train.shape[0],X_train.shape[1] , 1) X_test = X_test.reshape(X_test.shape[0],X_test.shape[1] , 1) ### Create the Stacked LSTM model from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from tensorflow.keras.layers import LSTM model=Sequential() model.add(LSTM(50,return_sequences=True,input_shape=(100,1))) model.add(LSTM(50,return_sequences=True)) model.add(LSTM(50)) model.add(Dense(1)) model.compile(loss='mean_squared_error',optimizer='adam') model.summary() model.summary() model.fit(X_train,y_train,validation_data=(X_test,ytest),epochs=100,batch_size=64,verbose=1) import tensorflow as tf tf.__version__ ### Lets Do the prediction and check performance metrics train_predict=model.predict(X_train) test_predict=model.predict(X_test) ##Transformback to original form train_predict=scaler.inverse_transform(train_predict) test_predict=scaler.inverse_transform(test_predict) ### Calculate RMSE performance metrics import math from sklearn.metrics import mean_squared_error math.sqrt(mean_squared_error(y_train,train_predict)) ### Test Data RMSE math.sqrt(mean_squared_error(ytest,test_predict)) ### Plotting # shift train predictions for plotting look_back=100 trainPredictPlot = numpy.empty_like(df1) trainPredictPlot[:, :] = np.nan trainPredictPlot[look_back:len(train_predict)+look_back, :] = train_predict # shift test predictions for plotting testPredictPlot = numpy.empty_like(df1) testPredictPlot[:, :] = numpy.nan testPredictPlot[len(train_predict)+(look_back*2)+1:len(df1)-1, :] = test_predict # plot baseline and predictions plt.plot(scaler.inverse_transform(df1)) plt.plot(trainPredictPlot) plt.plot(testPredictPlot) plt.show() len(test_data) x_input=test_data[341:].reshape(1,-1) x_input.shape temp_input=list(x_input) temp_input=temp_input[0].tolist() temp_input # demonstrate prediction for next 10 days from numpy import array lst_output=[] n_steps=100 i=0 while(i<30): if(len(temp_input)>100): #print(temp_input) x_input=np.array(temp_input[1:]) print("{} day input {}".format(i,x_input)) x_input=x_input.reshape(1,-1) x_input = x_input.reshape((1, n_steps, 1)) #print(x_input) yhat = model.predict(x_input, verbose=0) print("{} day output {}".format(i,yhat)) temp_input.extend(yhat[0].tolist()) temp_input=temp_input[1:] #print(temp_input) lst_output.extend(yhat.tolist()) i=i+1 else: x_input = x_input.reshape((1, n_steps,1)) yhat = model.predict(x_input, verbose=0) print(yhat[0]) temp_input.extend(yhat[0].tolist()) print(len(temp_input)) lst_output.extend(yhat.tolist()) i=i+1 print(lst_output) day_new=np.arange(1,101) day_pred=np.arange(101,131) import matplotlib.pyplot as plt len(df1) plt.plot(day_new,scaler.inverse_transform(df1[1158:])) plt.plot(day_pred,scaler.inverse_transform(lst_output)) df3=df1.tolist() df3.extend(lst_output) plt.plot(df3[1200:]) df3=scaler.inverse_transform(df3).tolist() plt.plot(df3) ```
github_jupyter
#@title Default title text import pandas_datareader as pdr key="" df = pdr.get_data_tiingo('AAPL', api_key=e") df.to_csv('AAPL.csv') import pandas as pd df=pd.read_csv('AAPL.csv') df.head() df.tail() df1=df.reset_index()['close'] df1.tail(500) import matplotlib.pyplot as plt plt.plot(df1) ### LSTM are sensitive to the scale of the data. so we apply MinMax scaler import numpy as np df1 from sklearn.preprocessing import MinMaxScaler scaler=MinMaxScaler(feature_range=(0,1)) df1=scaler.fit_transform(np.array(df1).reshape(-1,1)) print(df1) ##splitting dataset into train and test split training_size=int(len(df1)*0.65) test_size=len(df1)-training_size train_data,test_data=df1[0:training_size,:],df1[training_size:len(df1),:1] training_size,test_size train_data import numpy # convert an array of values into a dataset matrix def create_dataset(dataset, time_step=1): dataX, dataY = [], [] for i in range(len(dataset)-time_step-1): a = dataset[i:(i+time_step), 0] ###i=0, 0,1,2,3-----99 100 dataX.append(a) dataY.append(dataset[i + time_step, 0]) return numpy.array(dataX), numpy.array(dataY) # reshape into X=t,t+1,t+2,t+3 and Y=t+4 time_step = 100 X_train, y_train = create_dataset(train_data, time_step) X_test, ytest = create_dataset(test_data, time_step) print(X_train.shape), print(y_train.shape) print(X_test.shape), print(ytest.shape) # reshape input to be [samples, time steps, features] which is required for LSTM X_train =X_train.reshape(X_train.shape[0],X_train.shape[1] , 1) X_test = X_test.reshape(X_test.shape[0],X_test.shape[1] , 1) ### Create the Stacked LSTM model from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from tensorflow.keras.layers import LSTM model=Sequential() model.add(LSTM(50,return_sequences=True,input_shape=(100,1))) model.add(LSTM(50,return_sequences=True)) model.add(LSTM(50)) model.add(Dense(1)) model.compile(loss='mean_squared_error',optimizer='adam') model.summary() model.summary() model.fit(X_train,y_train,validation_data=(X_test,ytest),epochs=100,batch_size=64,verbose=1) import tensorflow as tf tf.__version__ ### Lets Do the prediction and check performance metrics train_predict=model.predict(X_train) test_predict=model.predict(X_test) ##Transformback to original form train_predict=scaler.inverse_transform(train_predict) test_predict=scaler.inverse_transform(test_predict) ### Calculate RMSE performance metrics import math from sklearn.metrics import mean_squared_error math.sqrt(mean_squared_error(y_train,train_predict)) ### Test Data RMSE math.sqrt(mean_squared_error(ytest,test_predict)) ### Plotting # shift train predictions for plotting look_back=100 trainPredictPlot = numpy.empty_like(df1) trainPredictPlot[:, :] = np.nan trainPredictPlot[look_back:len(train_predict)+look_back, :] = train_predict # shift test predictions for plotting testPredictPlot = numpy.empty_like(df1) testPredictPlot[:, :] = numpy.nan testPredictPlot[len(train_predict)+(look_back*2)+1:len(df1)-1, :] = test_predict # plot baseline and predictions plt.plot(scaler.inverse_transform(df1)) plt.plot(trainPredictPlot) plt.plot(testPredictPlot) plt.show() len(test_data) x_input=test_data[341:].reshape(1,-1) x_input.shape temp_input=list(x_input) temp_input=temp_input[0].tolist() temp_input # demonstrate prediction for next 10 days from numpy import array lst_output=[] n_steps=100 i=0 while(i<30): if(len(temp_input)>100): #print(temp_input) x_input=np.array(temp_input[1:]) print("{} day input {}".format(i,x_input)) x_input=x_input.reshape(1,-1) x_input = x_input.reshape((1, n_steps, 1)) #print(x_input) yhat = model.predict(x_input, verbose=0) print("{} day output {}".format(i,yhat)) temp_input.extend(yhat[0].tolist()) temp_input=temp_input[1:] #print(temp_input) lst_output.extend(yhat.tolist()) i=i+1 else: x_input = x_input.reshape((1, n_steps,1)) yhat = model.predict(x_input, verbose=0) print(yhat[0]) temp_input.extend(yhat[0].tolist()) print(len(temp_input)) lst_output.extend(yhat.tolist()) i=i+1 print(lst_output) day_new=np.arange(1,101) day_pred=np.arange(101,131) import matplotlib.pyplot as plt len(df1) plt.plot(day_new,scaler.inverse_transform(df1[1158:])) plt.plot(day_pred,scaler.inverse_transform(lst_output)) df3=df1.tolist() df3.extend(lst_output) plt.plot(df3[1200:]) df3=scaler.inverse_transform(df3).tolist() plt.plot(df3)
0.391173
0.525491
``` %matplotlib inline import os import torch import torchvision from torch import nn from d2l import torch as d2l # 热狗数据集来源于网络 d2l.DATA_HUB['hotdog'] = (d2l.DATA_URL + 'hotdog.zip', 'fba480ffa8aa7e0febbb511d181409f899b9baa5') data_dir = d2l.download_extract('hotdog') train_imgs = torchvision.datasets.ImageFolder(os.path.join(data_dir, 'train')) test_imgs = torchvision.datasets.ImageFolder(os.path.join(data_dir, 'test')) # 图像的大小和纵横比各有不同 hotdogs = [train_imgs[i][0] for i in range(8)] not_hotdogs = [train_imgs[-i - 1][0] for i in range(8)] d2l.show_images(hotdogs + not_hotdogs, 2, 8, scale=1.4) # 数据增广 normalize = torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) train_augs = torchvision.transforms.Compose([ torchvision.transforms.RandomResizedCrop(224), torchvision.transforms.RandomHorizontalFlip(), torchvision.transforms.ToTensor(), normalize ]) test_augs = torchvision.transforms.Compose([ torchvision.transforms.Resize(256), torchvision.transforms.CenterCrop(224), torchvision.transforms.ToTensor(), normalize ]) # 定义和初始化模型 pretrained_net = torchvision.models.resnet18(pretrained=True) pretrained_net.fc finetune_net = torchvision.models.resnet18(pretrained=True) finetune_net.fc = nn.Linear(finetune_net.fc.in_features, 2) nn.init.xavier_uniform_(finetune_net.fc.weight) # 微调模型 def train_fine_tuning(net, learning_rate, batch_size=128, num_epochs=5, param_group=True): train_iter = torch.utils.data.DataLoader( torchvision.datasets.ImageFolder(os.path.join(data_dir, 'train'), transform=train_augs), batch_size=batch_size, shuffle=True ) test_iter = torch.utils.data.DataLoader( torchvision.datasets.ImageFolder(os.path.join(data_dir, 'test'), transform=test_augs), batch_size=batch_size ) devices = d2l.try_all_gpus() loss = nn.CrossEntropyLoss(reduction="none") if param_group: params_1x = [ param for name, param in net.named_parameters() if name not in ["fc.weight", "fc.bias"]] trainer = torch.optim.SGD([{ 'params': params_1x}, { 'params': net.fc.parameters(), 'lr': learning_rate * 10}], lr=learning_rate, weight_decay=0.001) else: trainer = torch.optim.SGD(net.parameters(), lr=learning_rate, weight_decay=0.001) d2l.train_ch13(net, train_iter, test_iter, loss, trainer, num_epochs, devices) # 使用较小的学习率 train_fine_tuning(finetune_net, 5e-5) # 为了进行比较, 所有模型参数初始化为随机值 scratch_net = torchvision.models.resnet18() scratch_net.fc = nn.Linear(scratch_net.fc.in_features, 2) train_fine_tuning(scratch_net, 5e-4, param_group=False) for param in finetune_net.parameters(): param.requires_grad = False weight = pretrained_net.fc.weight hotdog_w = torch.split(weight.data, 1, dim=0)[713] hotdog_w.shape ```
github_jupyter
%matplotlib inline import os import torch import torchvision from torch import nn from d2l import torch as d2l # 热狗数据集来源于网络 d2l.DATA_HUB['hotdog'] = (d2l.DATA_URL + 'hotdog.zip', 'fba480ffa8aa7e0febbb511d181409f899b9baa5') data_dir = d2l.download_extract('hotdog') train_imgs = torchvision.datasets.ImageFolder(os.path.join(data_dir, 'train')) test_imgs = torchvision.datasets.ImageFolder(os.path.join(data_dir, 'test')) # 图像的大小和纵横比各有不同 hotdogs = [train_imgs[i][0] for i in range(8)] not_hotdogs = [train_imgs[-i - 1][0] for i in range(8)] d2l.show_images(hotdogs + not_hotdogs, 2, 8, scale=1.4) # 数据增广 normalize = torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) train_augs = torchvision.transforms.Compose([ torchvision.transforms.RandomResizedCrop(224), torchvision.transforms.RandomHorizontalFlip(), torchvision.transforms.ToTensor(), normalize ]) test_augs = torchvision.transforms.Compose([ torchvision.transforms.Resize(256), torchvision.transforms.CenterCrop(224), torchvision.transforms.ToTensor(), normalize ]) # 定义和初始化模型 pretrained_net = torchvision.models.resnet18(pretrained=True) pretrained_net.fc finetune_net = torchvision.models.resnet18(pretrained=True) finetune_net.fc = nn.Linear(finetune_net.fc.in_features, 2) nn.init.xavier_uniform_(finetune_net.fc.weight) # 微调模型 def train_fine_tuning(net, learning_rate, batch_size=128, num_epochs=5, param_group=True): train_iter = torch.utils.data.DataLoader( torchvision.datasets.ImageFolder(os.path.join(data_dir, 'train'), transform=train_augs), batch_size=batch_size, shuffle=True ) test_iter = torch.utils.data.DataLoader( torchvision.datasets.ImageFolder(os.path.join(data_dir, 'test'), transform=test_augs), batch_size=batch_size ) devices = d2l.try_all_gpus() loss = nn.CrossEntropyLoss(reduction="none") if param_group: params_1x = [ param for name, param in net.named_parameters() if name not in ["fc.weight", "fc.bias"]] trainer = torch.optim.SGD([{ 'params': params_1x}, { 'params': net.fc.parameters(), 'lr': learning_rate * 10}], lr=learning_rate, weight_decay=0.001) else: trainer = torch.optim.SGD(net.parameters(), lr=learning_rate, weight_decay=0.001) d2l.train_ch13(net, train_iter, test_iter, loss, trainer, num_epochs, devices) # 使用较小的学习率 train_fine_tuning(finetune_net, 5e-5) # 为了进行比较, 所有模型参数初始化为随机值 scratch_net = torchvision.models.resnet18() scratch_net.fc = nn.Linear(scratch_net.fc.in_features, 2) train_fine_tuning(scratch_net, 5e-4, param_group=False) for param in finetune_net.parameters(): param.requires_grad = False weight = pretrained_net.fc.weight hotdog_w = torch.split(weight.data, 1, dim=0)[713] hotdog_w.shape
0.480479
0.62865
``` !pip install scikit-learn !pip install matplotlib !pip install plotly ### Import libraries import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder, StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression ## overview of the head of the dataset telco_churn = pd.read_csv('datasets_Telco-Customer-Churn.csv') telco_churn.head() ## review telco_churn.shape ## overview of the dataset telco_churn.dtypes ## Identify missing value telco_churn.isnull().any() telco_churn.nunique() ## Separate categorical and numerical values telco_churn.select_dtypes('object').head ## Graph telco_churn.boxplot() telco_churn.corr() ## Data frame of the total monthly charges compared to the churn df = telco_churn[['Churn','MonthlyCharges']].groupby(['Churn']).MonthlyCharges.sum().to_frame() df ## percentage of the monthly charges compared to the churn df = telco_churn[['Churn','MonthlyCharges']].groupby(['Churn']).MonthlyCharges.sum().to_frame()/df['MonthlyCharges'].sum() df ## Total numbers of churn df = telco_churn[['Churn']].groupby(['Churn']).size() df ## percentage of total numbers of churn df = telco_churn[['Churn']].groupby(['Churn']).size()/telco_churn['Churn'].count() df ## Dataframe of the churned df_churned = telco_churn[telco_churn.Churn == 'Yes'] df_churned.head() ## total sum of monthly charges of the churn df_churned = telco_churn[telco_churn.Churn == 'Yes'] df_churned.MonthlyCharges.sum() ## Total sum of monthly charges telco_churn.MonthlyCharges.sum() ## convert totalCharges in Float telco_churn['TotalCharges'] = pd.to_numeric(telco_churn['TotalCharges'],errors='coerce') ## Total sum of charges telco_churn.TotalCharges.sum() ## shape of churned data churn_yes = telco_churn[telco_churn['Churn'] == 'Yes'] churn_yes.shape ## proportion of customers lost and retained sns.countlot(x='Churn',data=telco_churn) plt.xticks([0,1], ['Retained','Churn']) plt.xlabel('Condition',size=15, labelpad=12,color='grey') plt.ylabel('Amount of customers',size=15, labelpad=12,color='grey') plt.title("Proportion of customers lost and retained", size=15, pad=20) plt.ylim(0,9000) plt.text(-0.15,7000,f"{round(amount_retained,2)}%",fontsize=12) plt.text(-0.85,1000,f"{round(amount_lost,2)}%",fontsize=12) sns.despine() ## comparison of internet service with dependents churn_yes[['Dependents','InternetService']].groupby(['Dependents','InternetService']).size().to_frame().rename(columns={0:'count'}).reset_index() ## comparison of internet service with gender churn_yes[['gender','InternetService']].groupby(['gender','InternetService']).size().to_frame().rename(columns={0:'count'}).reset_index() ## comparison of phone service with gender churn_yes[['gender','PhoneService']].groupby(['gender','PhoneService']).size().to_frame().rename(columns={0:'count'}).reset_index() ## comparison of phone service with dependents churn_yes[['Dependents','PhoneService']].groupby(['Dependents','PhoneService']).size().to_frame().rename(columns={0:'count'}).reset_index() ## comparison of senior citizen with gender churn_yes[['gender','SeniorCitizen']].groupby(['gender','SeniorCitizen']).size().to_frame().rename(columns={0:'count'}).reset_index() ## comparison of senior citizen with dependents churn_yes[['Dependents','SeniorCitizen']].groupby(['Dependents','SeniorCitizen']).size().to_frame().rename(columns={0:'count'}).reset_index() ## representative graph of the six cases fig,axes = plt.subplots(2,3,figsize = (15,8)) sns.set(style='darkgrid') ax1 = sns.countplot(x='gender', hue='SeniorCitizen', data=churn_yes, ax=axes[0,0]) ax2 = sns.countplot(x='Dependents', hue='SeniorCitizen', data=churn_yes, ax=axes[1,0]) ax3 = sns.countplot(x='gender', hue='PhoneService', data=churn_yes, ax=axes[0,1]) ax4 = sns.countplot(x='Dependents', hue='PhoneService', data=churn_yes, ax=axes[1,1]) ax5 = sns.countplot(x='gender', hue='InternetService', data=churn_yes, ax=axes[0,2]) ax6 = sns.countplot(x='Dependents', hue='InternetService', data=churn_yes, ax=axes[1,2]) ## Number of columns of type object col=telco_churn.columns[telco_churn.columns.dtype == 'object'] col = col[0] col ## Number of columns of type object except customerID and TotalCharges ignore_col = ['customerID','TotalCharges'] num_cols = [x for x in col if x not in ignore_col] num_cols ## dataset of the categorical object categorical = telco_churn[num_cols] categorical.head() ### creation of a dictionary replacing in column gender female by 1 and male by 0 in the dataset of the categorical object dict_gender = {'Female':1, 'Male':0} categorical['gender'] = categorical['gender'].replace(dict_gender) set(categorical['MultipleLines']) ### creation of a dictionary replacing in column MultipleLines No by 0,No phone service by 1 and Yes by 2 in the dataset of the categorical object dict_mult = {'No':0, 'No phone service':1, 'Yes':2} categorical['MultipleLines'] = categorical['MultipleLines'].replace(dict_mult) ### creation of a dictionary replacing in column Churn No by 0 and Yes by 1 in the dataset of the categorical object dict_churn = {'Yes':1, 'No':0} categorical['Churn'] = categorical['Churn'].replace(dict_churn) ### Correlation of categorical values categorical.corr() ## representative graph plt.figure(figsize=(10,5)) sns.heatmap(telco_churn.corr(),annot = True, linewidth = 0.6,cmap ='RdBu_r') plt.show() ```
github_jupyter
!pip install scikit-learn !pip install matplotlib !pip install plotly ### Import libraries import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder, StandardScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression ## overview of the head of the dataset telco_churn = pd.read_csv('datasets_Telco-Customer-Churn.csv') telco_churn.head() ## review telco_churn.shape ## overview of the dataset telco_churn.dtypes ## Identify missing value telco_churn.isnull().any() telco_churn.nunique() ## Separate categorical and numerical values telco_churn.select_dtypes('object').head ## Graph telco_churn.boxplot() telco_churn.corr() ## Data frame of the total monthly charges compared to the churn df = telco_churn[['Churn','MonthlyCharges']].groupby(['Churn']).MonthlyCharges.sum().to_frame() df ## percentage of the monthly charges compared to the churn df = telco_churn[['Churn','MonthlyCharges']].groupby(['Churn']).MonthlyCharges.sum().to_frame()/df['MonthlyCharges'].sum() df ## Total numbers of churn df = telco_churn[['Churn']].groupby(['Churn']).size() df ## percentage of total numbers of churn df = telco_churn[['Churn']].groupby(['Churn']).size()/telco_churn['Churn'].count() df ## Dataframe of the churned df_churned = telco_churn[telco_churn.Churn == 'Yes'] df_churned.head() ## total sum of monthly charges of the churn df_churned = telco_churn[telco_churn.Churn == 'Yes'] df_churned.MonthlyCharges.sum() ## Total sum of monthly charges telco_churn.MonthlyCharges.sum() ## convert totalCharges in Float telco_churn['TotalCharges'] = pd.to_numeric(telco_churn['TotalCharges'],errors='coerce') ## Total sum of charges telco_churn.TotalCharges.sum() ## shape of churned data churn_yes = telco_churn[telco_churn['Churn'] == 'Yes'] churn_yes.shape ## proportion of customers lost and retained sns.countlot(x='Churn',data=telco_churn) plt.xticks([0,1], ['Retained','Churn']) plt.xlabel('Condition',size=15, labelpad=12,color='grey') plt.ylabel('Amount of customers',size=15, labelpad=12,color='grey') plt.title("Proportion of customers lost and retained", size=15, pad=20) plt.ylim(0,9000) plt.text(-0.15,7000,f"{round(amount_retained,2)}%",fontsize=12) plt.text(-0.85,1000,f"{round(amount_lost,2)}%",fontsize=12) sns.despine() ## comparison of internet service with dependents churn_yes[['Dependents','InternetService']].groupby(['Dependents','InternetService']).size().to_frame().rename(columns={0:'count'}).reset_index() ## comparison of internet service with gender churn_yes[['gender','InternetService']].groupby(['gender','InternetService']).size().to_frame().rename(columns={0:'count'}).reset_index() ## comparison of phone service with gender churn_yes[['gender','PhoneService']].groupby(['gender','PhoneService']).size().to_frame().rename(columns={0:'count'}).reset_index() ## comparison of phone service with dependents churn_yes[['Dependents','PhoneService']].groupby(['Dependents','PhoneService']).size().to_frame().rename(columns={0:'count'}).reset_index() ## comparison of senior citizen with gender churn_yes[['gender','SeniorCitizen']].groupby(['gender','SeniorCitizen']).size().to_frame().rename(columns={0:'count'}).reset_index() ## comparison of senior citizen with dependents churn_yes[['Dependents','SeniorCitizen']].groupby(['Dependents','SeniorCitizen']).size().to_frame().rename(columns={0:'count'}).reset_index() ## representative graph of the six cases fig,axes = plt.subplots(2,3,figsize = (15,8)) sns.set(style='darkgrid') ax1 = sns.countplot(x='gender', hue='SeniorCitizen', data=churn_yes, ax=axes[0,0]) ax2 = sns.countplot(x='Dependents', hue='SeniorCitizen', data=churn_yes, ax=axes[1,0]) ax3 = sns.countplot(x='gender', hue='PhoneService', data=churn_yes, ax=axes[0,1]) ax4 = sns.countplot(x='Dependents', hue='PhoneService', data=churn_yes, ax=axes[1,1]) ax5 = sns.countplot(x='gender', hue='InternetService', data=churn_yes, ax=axes[0,2]) ax6 = sns.countplot(x='Dependents', hue='InternetService', data=churn_yes, ax=axes[1,2]) ## Number of columns of type object col=telco_churn.columns[telco_churn.columns.dtype == 'object'] col = col[0] col ## Number of columns of type object except customerID and TotalCharges ignore_col = ['customerID','TotalCharges'] num_cols = [x for x in col if x not in ignore_col] num_cols ## dataset of the categorical object categorical = telco_churn[num_cols] categorical.head() ### creation of a dictionary replacing in column gender female by 1 and male by 0 in the dataset of the categorical object dict_gender = {'Female':1, 'Male':0} categorical['gender'] = categorical['gender'].replace(dict_gender) set(categorical['MultipleLines']) ### creation of a dictionary replacing in column MultipleLines No by 0,No phone service by 1 and Yes by 2 in the dataset of the categorical object dict_mult = {'No':0, 'No phone service':1, 'Yes':2} categorical['MultipleLines'] = categorical['MultipleLines'].replace(dict_mult) ### creation of a dictionary replacing in column Churn No by 0 and Yes by 1 in the dataset of the categorical object dict_churn = {'Yes':1, 'No':0} categorical['Churn'] = categorical['Churn'].replace(dict_churn) ### Correlation of categorical values categorical.corr() ## representative graph plt.figure(figsize=(10,5)) sns.heatmap(telco_churn.corr(),annot = True, linewidth = 0.6,cmap ='RdBu_r') plt.show()
0.730097
0.645546
# Deep Learning ### Neural Networks A neural network is composed of layers, each containing neurons or units as they are called nowadays, see image. The goal in Deep Learning is to create powerful models that learn features in the input data. A neural network is a function composition of layers. ``` from IPython.display import Image Image(filename="complete_cnn.jpeg") ``` ## Convolutional Neural Networks In computer vision convolutional neural networks are used for example in image classification, object detection, image captioning and semantic segmentation among other things. The principal components in an CNN are: * convolution layers * max or average pooling * full connected layers ### Convolutional layer ``` Image(filename="convolution.png") import tensorflow as tf from functools import partial from tensorflow import keras import numpy as np tf.random.set_seed(42) ``` Download data from keras. Use the cifar 10. ``` fashion_mnist = keras.datasets.fashion_mnist (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() num_classes = 10 # Make sure images have shape (28, 28, 1) x_train = np.expand_dims(x_train, -1) x_test = np.expand_dims(x_test, -1) # convert class vectors to binary class matrices y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) x_train.shape, x_train[0, :, :].max(), x_train[0, :, :].min() x_train, x_test = x_train/255.0, x_test/255.0 ``` # Create model ``` def exponential_decay(lr0, s): def exponential_decay_fn(epoch): return lr0 * 0.1 ** (epoch/s) return exponential_decay_fn def create_cnn(activation='relu', padding='same', input_shape=[28, 28, 1], output_dim=10): """Create convolutional neural network""" model = tf.keras.Sequential() partial_cnn = partial(tf.keras.layers.Conv2D, activation=activation, padding=padding) model.add(partial_cnn(64, 7, input_shape=input_shape)) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.MaxPooling2D(2)) model.add(partial_cnn(128, 3)) model.add(tf.keras.layers.BatchNormalization()) model.add(partial_cnn(128, 3)) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.MaxPooling2D(2)) model.add(partial_cnn(256, 3)) model.add(tf.keras.layers.BatchNormalization()) model.add(partial_cnn(256, 3)) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.MaxPooling2D(2)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(128, activation='relu')) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(64, activation='relu')) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(output_dim, activation='softmax')) optimizer = tf.keras.optimizers.Nadam() model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) return model model = create_cnn() model.summary() x_train.shape, y_train.shape ``` # Train Model ``` exponential_decay_fn = exponential_decay(lr0=0.1, s=10) exp_schedule = tf.keras.callbacks.LearningRateScheduler(exponential_decay_fn) early_stopping = tf.keras.callbacks.EarlyStopping(patience=10) callbacks = [early_stopping, exp_schedule] history = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=100, batch_size=32, # callbacks=callbacks ) import pandas as pd import matplotlib.pyplot as plt pd.DataFrame(history.history).plot(figsize=(8, 5), grid=True) plt.gca().set_ylim(0, 1) plt.show(); ``` ### Exercise Replace batch-normalization with the SELU activation function. Is the performance better? * Normalize the input * Use LeCun normal initialization * Make sure that the DNN contains only a sequence of dense layers
github_jupyter
from IPython.display import Image Image(filename="complete_cnn.jpeg") Image(filename="convolution.png") import tensorflow as tf from functools import partial from tensorflow import keras import numpy as np tf.random.set_seed(42) fashion_mnist = keras.datasets.fashion_mnist (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() num_classes = 10 # Make sure images have shape (28, 28, 1) x_train = np.expand_dims(x_train, -1) x_test = np.expand_dims(x_test, -1) # convert class vectors to binary class matrices y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) x_train.shape, x_train[0, :, :].max(), x_train[0, :, :].min() x_train, x_test = x_train/255.0, x_test/255.0 def exponential_decay(lr0, s): def exponential_decay_fn(epoch): return lr0 * 0.1 ** (epoch/s) return exponential_decay_fn def create_cnn(activation='relu', padding='same', input_shape=[28, 28, 1], output_dim=10): """Create convolutional neural network""" model = tf.keras.Sequential() partial_cnn = partial(tf.keras.layers.Conv2D, activation=activation, padding=padding) model.add(partial_cnn(64, 7, input_shape=input_shape)) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.MaxPooling2D(2)) model.add(partial_cnn(128, 3)) model.add(tf.keras.layers.BatchNormalization()) model.add(partial_cnn(128, 3)) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.MaxPooling2D(2)) model.add(partial_cnn(256, 3)) model.add(tf.keras.layers.BatchNormalization()) model.add(partial_cnn(256, 3)) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.MaxPooling2D(2)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(128, activation='relu')) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(64, activation='relu')) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(output_dim, activation='softmax')) optimizer = tf.keras.optimizers.Nadam() model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) return model model = create_cnn() model.summary() x_train.shape, y_train.shape exponential_decay_fn = exponential_decay(lr0=0.1, s=10) exp_schedule = tf.keras.callbacks.LearningRateScheduler(exponential_decay_fn) early_stopping = tf.keras.callbacks.EarlyStopping(patience=10) callbacks = [early_stopping, exp_schedule] history = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=100, batch_size=32, # callbacks=callbacks ) import pandas as pd import matplotlib.pyplot as plt pd.DataFrame(history.history).plot(figsize=(8, 5), grid=True) plt.gca().set_ylim(0, 1) plt.show();
0.900418
0.980091
PWC-Net-small model training (with cyclical learning rate schedule) ======================================================= In this notebook we: - Use a small model (no dense or residual connections), 6 level pyramid, uspample level 2 by 4 as the final flow prediction - Train the PWC-Net-small model on a mix of the `FlyingChairs` and `FlyingThings3DHalfRes` dataset using a Cyclic<sub>short</sub> schedule of our own - The Cyclic<sub>short</sub> schedule oscillates between `5e-04` and `1e-05` for 200,000 steps Below, look for `TODO` references and customize this notebook based on your own needs. ## Reference [2018a]<a name="2018a"></a> Sun et al. 2018. PWC-Net: CNNs for Optical Flow Using Pyramid, Warping, and Cost Volume. [[arXiv]](https://arxiv.org/abs/1709.02371) [[web]](http://research.nvidia.com/publication/2018-02_PWC-Net%3A-CNNs-for) [[PyTorch (Official)]](https://github.com/NVlabs/PWC-Net/tree/master/PyTorch) [[Caffe (Official)]](https://github.com/NVlabs/PWC-Net/tree/master/Caffe) ``` """ pwcnet_train.ipynb PWC-Net model training. Written by Phil Ferriere Licensed under the MIT License (see LICENSE for details) Tensorboard: [win] tensorboard --logdir=E:\\repos\\tf-optflow\\tfoptflow\\pwcnet-sm-6-2-cyclic-chairsthingsmix [ubu] tensorboard --logdir=/media/EDrive/repos/tf-optflow/tfoptflow/pwcnet-sm-6-2-cyclic-chairsthingsmix """ from __future__ import absolute_import, division, print_function import sys from copy import deepcopy from dataset_base import _DEFAULT_DS_TRAIN_OPTIONS from dataset_flyingchairs import FlyingChairsDataset from dataset_flyingthings3d import FlyingThings3DHalfResDataset from dataset_mixer import MixedDataset from model_pwcnet import ModelPWCNet, _DEFAULT_PWCNET_TRAIN_OPTIONS ``` ## TODO: Set this first! ``` # TODO: You MUST set dataset_root to the correct path on your machine! if sys.platform.startswith("win"): _DATASET_ROOT = 'E:/datasets/' else: _DATASET_ROOT = '/media/EDrive/datasets/' _FLYINGCHAIRS_ROOT = _DATASET_ROOT + 'FlyingChairs_release' _FLYINGTHINGS3DHALFRES_ROOT = _DATASET_ROOT + 'FlyingThings3D_HalfRes' # TODO: You MUST adjust the settings below based on the number of GPU(s) used for training # Set controller device and devices # A one-gpu setup would be something like controller='/device:GPU:0' and gpu_devices=['/device:GPU:0'] # Here, we use a dual-GPU setup, as shown below gpu_devices = ['/device:GPU:0', '/device:GPU:1'] controller = '/device:CPU:0' # TODO: You MUST adjust this setting below based on the amount of memory on your GPU(s) # Batch size batch_size = 8 ``` # Pre-train on `FlyingChairs+FlyingThings3DHalfRes` mix ## Load the dataset ``` # TODO: You MUST set the batch size based on the capabilities of your GPU(s) # Load train dataset ds_opts = deepcopy(_DEFAULT_DS_TRAIN_OPTIONS) ds_opts['in_memory'] = False # Too many samples to keep in memory at once, so don't preload them ds_opts['aug_type'] = 'heavy' # Apply all supported augmentations ds_opts['batch_size'] = batch_size * len(gpu_devices) # Use a multiple of 8; here, 16 for dual-GPU mode (Titan X & 1080 Ti) ds_opts['crop_preproc'] = (256, 448) # Crop to a smaller input size ds1 = FlyingChairsDataset(mode='train_with_val', ds_root=_FLYINGCHAIRS_ROOT, options=ds_opts) ds_opts['type'] = 'into_future' ds2 = FlyingThings3DHalfResDataset(mode='train_with_val', ds_root=_FLYINGTHINGS3DHALFRES_ROOT, options=ds_opts) ds = MixedDataset(mode='train_with_val', datasets=[ds1, ds2], options=ds_opts) # Display dataset configuration ds.print_config() ``` ## Configure the training ``` # Start from the default options nn_opts = deepcopy(_DEFAULT_PWCNET_TRAIN_OPTIONS) nn_opts['verbose'] = True nn_opts['ckpt_dir'] = './pwcnet-sm-6-2-cyclic-chairsthingsmix/' nn_opts['batch_size'] = ds_opts['batch_size'] nn_opts['x_shape'] = [2, ds_opts['crop_preproc'][0], ds_opts['crop_preproc'][1], 3] nn_opts['y_shape'] = [ds_opts['crop_preproc'][0], ds_opts['crop_preproc'][1], 2] nn_opts['use_tf_data'] = True # Use tf.data reader nn_opts['gpu_devices'] = gpu_devices nn_opts['controller'] = controller # Use the PWC-Net-small model in quarter-resolution mode nn_opts['use_dense_cx'] = False nn_opts['use_res_cx'] = False nn_opts['pyr_lvls'] = 6 nn_opts['flow_pred_lvl'] = 2 # Set the learning rate schedule. This schedule is for a single GPU using a batch size of 8. # Below,we adjust the schedule to the size of the batch and the number of GPUs. nn_opts['lr_policy'] = 'cyclic' nn_opts['cyclic_lr_max'] = 5e-04 # Anything higher will generate NaNs nn_opts['cyclic_lr_base'] = 1e-05 nn_opts['cyclic_lr_stepsize'] = 20000 nn_opts['max_steps'] = 200000 # Below,we adjust the schedule to the size of the batch and our number of GPUs (2). nn_opts['max_steps'] = int(nn_opts['max_steps'] * 8 / ds_opts['batch_size']) nn_opts['cyclic_lr_stepsize'] = int(nn_opts['cyclic_lr_stepsize'] * 8 / ds_opts['batch_size']) # Instantiate the model and display the model configuration nn = ModelPWCNet(mode='train_with_val', options=nn_opts, dataset=ds) nn.print_config() ``` ## Train the model ``` # Train the model nn.train() ``` ## Training log Here are the training curves for the run above: ![](img/pwcnet-sm-6-2-cyclic-chairsthingsmix/loss.png) ![](img/pwcnet-sm-6-2-cyclic-chairsthingsmix/epe.png) ![](img/pwcnet-sm-6-2-cyclic-chairsthingsmix/lr.png) Here are the predictions issued by the model for a few validation samples: ![](img/pwcnet-sm-6-2-cyclic-chairsthingsmix/val1.png) ![](img/pwcnet-sm-6-2-cyclic-chairsthingsmix/val2.png) ![](img/pwcnet-sm-6-2-cyclic-chairsthingsmix/val3.png) ![](img/pwcnet-sm-6-2-cyclic-chairsthingsmix/val4.png) ![](img/pwcnet-sm-6-2-cyclic-chairsthingsmix/val5.png) ![](img/pwcnet-sm-6-2-cyclic-chairsthingsmix/val6.png) ![](img/pwcnet-sm-6-2-cyclic-chairsthingsmix/val7.png) ![](img/pwcnet-sm-6-2-cyclic-chairsthingsmix/val8.png)
github_jupyter
""" pwcnet_train.ipynb PWC-Net model training. Written by Phil Ferriere Licensed under the MIT License (see LICENSE for details) Tensorboard: [win] tensorboard --logdir=E:\\repos\\tf-optflow\\tfoptflow\\pwcnet-sm-6-2-cyclic-chairsthingsmix [ubu] tensorboard --logdir=/media/EDrive/repos/tf-optflow/tfoptflow/pwcnet-sm-6-2-cyclic-chairsthingsmix """ from __future__ import absolute_import, division, print_function import sys from copy import deepcopy from dataset_base import _DEFAULT_DS_TRAIN_OPTIONS from dataset_flyingchairs import FlyingChairsDataset from dataset_flyingthings3d import FlyingThings3DHalfResDataset from dataset_mixer import MixedDataset from model_pwcnet import ModelPWCNet, _DEFAULT_PWCNET_TRAIN_OPTIONS # TODO: You MUST set dataset_root to the correct path on your machine! if sys.platform.startswith("win"): _DATASET_ROOT = 'E:/datasets/' else: _DATASET_ROOT = '/media/EDrive/datasets/' _FLYINGCHAIRS_ROOT = _DATASET_ROOT + 'FlyingChairs_release' _FLYINGTHINGS3DHALFRES_ROOT = _DATASET_ROOT + 'FlyingThings3D_HalfRes' # TODO: You MUST adjust the settings below based on the number of GPU(s) used for training # Set controller device and devices # A one-gpu setup would be something like controller='/device:GPU:0' and gpu_devices=['/device:GPU:0'] # Here, we use a dual-GPU setup, as shown below gpu_devices = ['/device:GPU:0', '/device:GPU:1'] controller = '/device:CPU:0' # TODO: You MUST adjust this setting below based on the amount of memory on your GPU(s) # Batch size batch_size = 8 # TODO: You MUST set the batch size based on the capabilities of your GPU(s) # Load train dataset ds_opts = deepcopy(_DEFAULT_DS_TRAIN_OPTIONS) ds_opts['in_memory'] = False # Too many samples to keep in memory at once, so don't preload them ds_opts['aug_type'] = 'heavy' # Apply all supported augmentations ds_opts['batch_size'] = batch_size * len(gpu_devices) # Use a multiple of 8; here, 16 for dual-GPU mode (Titan X & 1080 Ti) ds_opts['crop_preproc'] = (256, 448) # Crop to a smaller input size ds1 = FlyingChairsDataset(mode='train_with_val', ds_root=_FLYINGCHAIRS_ROOT, options=ds_opts) ds_opts['type'] = 'into_future' ds2 = FlyingThings3DHalfResDataset(mode='train_with_val', ds_root=_FLYINGTHINGS3DHALFRES_ROOT, options=ds_opts) ds = MixedDataset(mode='train_with_val', datasets=[ds1, ds2], options=ds_opts) # Display dataset configuration ds.print_config() # Start from the default options nn_opts = deepcopy(_DEFAULT_PWCNET_TRAIN_OPTIONS) nn_opts['verbose'] = True nn_opts['ckpt_dir'] = './pwcnet-sm-6-2-cyclic-chairsthingsmix/' nn_opts['batch_size'] = ds_opts['batch_size'] nn_opts['x_shape'] = [2, ds_opts['crop_preproc'][0], ds_opts['crop_preproc'][1], 3] nn_opts['y_shape'] = [ds_opts['crop_preproc'][0], ds_opts['crop_preproc'][1], 2] nn_opts['use_tf_data'] = True # Use tf.data reader nn_opts['gpu_devices'] = gpu_devices nn_opts['controller'] = controller # Use the PWC-Net-small model in quarter-resolution mode nn_opts['use_dense_cx'] = False nn_opts['use_res_cx'] = False nn_opts['pyr_lvls'] = 6 nn_opts['flow_pred_lvl'] = 2 # Set the learning rate schedule. This schedule is for a single GPU using a batch size of 8. # Below,we adjust the schedule to the size of the batch and the number of GPUs. nn_opts['lr_policy'] = 'cyclic' nn_opts['cyclic_lr_max'] = 5e-04 # Anything higher will generate NaNs nn_opts['cyclic_lr_base'] = 1e-05 nn_opts['cyclic_lr_stepsize'] = 20000 nn_opts['max_steps'] = 200000 # Below,we adjust the schedule to the size of the batch and our number of GPUs (2). nn_opts['max_steps'] = int(nn_opts['max_steps'] * 8 / ds_opts['batch_size']) nn_opts['cyclic_lr_stepsize'] = int(nn_opts['cyclic_lr_stepsize'] * 8 / ds_opts['batch_size']) # Instantiate the model and display the model configuration nn = ModelPWCNet(mode='train_with_val', options=nn_opts, dataset=ds) nn.print_config() # Train the model nn.train()
0.490724
0.876264
## Multi-label prediction with Planet Amazon dataset ``` %reload_ext autoreload %autoreload 2 %matplotlib inline from fastai import * from fastai.vision import * ``` ## Getting the data The planet dataset isn't available on the [fastai dataset page](https://course.fast.ai/datasets) due to copyright restrictions. You can download it from Kaggle however. Let's see how to do this by using the [Kaggle API](https://github.com/Kaggle/kaggle-api) as it's going to be pretty useful to you if you want to join a competition or use other Kaggle datasets later on. First, install the Kaggle API by uncommenting the following line and executing it, or by executing it in your terminal (depending on your platform you may need to modify this slightly to either add `source activate fastai` or similar, or prefix `pip` with a path. Have a look at how `conda install` is called for your platform in the appropriate *Returning to work* section of https://course-v3.fast.ai/. (Depending on your environment, you may also need to append "--user" to the command.) ``` # ! pip install kaggle --upgrade ``` Then you need to upload your credentials from Kaggle on your instance. Login to kaggle and click on your profile picture on the top left corner, then 'My account'. Scroll down until you find a button named 'Create New API Token' and click on it. This will trigger the download of a file named 'kaggle.json'. Upload this file to the directory this notebook is running in, by clicking "Upload" on your main Jupyter page, then uncomment and execute the next two commands (or run them in a terminal). ``` #! mkdir -p ~/.kaggle/ #! mv kaggle.json ~/.kaggle/ ``` You're all set to download the data from [planet competition](https://www.kaggle.com/c/planet-understanding-the-amazon-from-space). You **first need to go to its main page and accept its rules**, and run the two cells below (uncomment the shell commands to download and unzip the data). If you get a `403 forbidden` error it means you haven't accepted the competition rules yet (you have to go to the competition page, click on *Rules* tab, and then scroll to the bottom to find the *accept* button). ``` path = Config.data_path()/'planet' path.mkdir(exist_ok=True) path # ! kaggle competitions download -c planet-understanding-the-amazon-from-space -f train-jpg.tar.7z -p {path} # ! kaggle competitions download -c planet-understanding-the-amazon-from-space -f train_v2.csv -p {path} # ! unzip -q -n {path}/train_v2.csv.zip -d {path} ``` To extract the content of this file, we'll need 7zip, so uncomment the following line if you need to install it (or run `sudo apt install p7zip` in your terminal). ``` # ! conda install -y -c haasad eidl7zip ``` And now we can unpack the data (uncomment to run - this might take a few minutes to complete). ``` # ! 7za -bd -y -so x {path}/train-jpg.tar.7z | tar xf - -C {path} ``` ## Multiclassification Contrary to the pets dataset studied in last lesson, here each picture can have multiple labels. If we take a look at the csv file containing the labels (in 'train_v2.csv' here) we see that each 'image_name' is associated to several tags separated by spaces. ``` df = pd.read_csv(path/'train_v2.csv') df.head() ``` To put this in a `DataBunch` while using the [data block API](https://docs.fast.ai/data_block.html), we then need to using `ImageMultiDataset` (and not `ImageClassificationDataset`). This will make sure the model created has the proper loss function to deal with the multiple classes. ``` tfms = get_transforms(flip_vert=True, max_lighting=0.1, max_zoom=1.05, max_warp=0.) ``` We use parentheses around the data block pipeline below, so that we can use a multiline statement without needing to add '\\'. ``` np.random.seed(42) src = (ImageFileList.from_folder(path) .label_from_csv('train_v2.csv', sep=' ', folder='train-jpg', suffix='.jpg') .random_split_by_pct(0.2)) data = (src.datasets() .transform(tfms, size=128) .databunch().normalize(imagenet_stats)) ``` `show_batch` still works, and show us the different labels separated by `;`. ``` data.show_batch(rows=3, figsize=(10,9)) ``` To create a `Learner` we use the same function as in lesson 1. Our base architecture is resnet34 again, but the metrics are a little bit differeent: we use `accuracy_thresh` instead of `accuracy`. In lesson 1, we determined the predicition for a given class by picking the final activation that was the biggest, but here, each activation can be 0. or 1. `accuracy_thresh` selects the ones that are above a certain threshold (0.5 by default) and compares them to the ground truth. As for Fbeta, it's the metric that was used by Kaggle on this competition. See [here](https://en.wikipedia.org/wiki/F1_score) for more details. ``` arch = models.resnet50 acc_02 = partial(accuracy_thresh, thresh=0.2) f_score = partial(fbeta, thresh=0.2) learn = create_cnn(data, arch, metrics=[acc_02, f_score]) ``` We use the LR Finder to pick a good learning rate. ``` learn.lr_find() learn.recorder.plot() ``` Then we can fit the head of our network. ``` lr = 0.01 learn.fit_one_cycle(5, slice(lr)) learn.save('stage-1-rn50') ``` ...And fine-tune the whole model: ``` learn.unfreeze() learn.lr_find() learn.recorder.plot() learn.fit_one_cycle(5, slice(1e-5, lr/5)) learn.save('stage-2-rn50') data = (src.datasets(ImageMultiDataset) .transform(tfms, size=256) .databunch().normalize(imagenet_stats)) learn.data = data data.train_ds[0][0].shape learn.freeze() learn.lr_find() learn.recorder.plot() lr=1e-2/2 learn.fit_one_cycle(5, slice(lr)) learn.save('stage-1-256-rn50') learn.unfreeze() learn.fit_one_cycle(5, slice(1e-5, lr/5)) learn.recorder.plot_losses() learn.save('stage-2-256-rn50') ``` You won't really know how you're going until you submit to Kaggle, since the leaderboard isn't using the same subset as we have for training. But as a guide, 50th place (out of 938 teams) on the private leaderboard was a score of `0.930`. ## fin (We'll look at this section later - please don't ask about it just yet! :) ) ``` # ! kaggle competitions download -c planet-understanding-the-amazon-from-space -f test-jpg.tar.7z -p {path} # ! 7za -bd -y -so x {path}/test-jpg.tar.7z | tar xf - -C {path} learn.load('stage-2-256-rn50') learn.data = (src.add_test_folder('test-jpg') .datasets(ImageMultiDataset) .transform(tfms, size=256) .databunch().normalize(imagenet_stats)) ```
github_jupyter
%reload_ext autoreload %autoreload 2 %matplotlib inline from fastai import * from fastai.vision import * # ! pip install kaggle --upgrade #! mkdir -p ~/.kaggle/ #! mv kaggle.json ~/.kaggle/ path = Config.data_path()/'planet' path.mkdir(exist_ok=True) path # ! kaggle competitions download -c planet-understanding-the-amazon-from-space -f train-jpg.tar.7z -p {path} # ! kaggle competitions download -c planet-understanding-the-amazon-from-space -f train_v2.csv -p {path} # ! unzip -q -n {path}/train_v2.csv.zip -d {path} # ! conda install -y -c haasad eidl7zip # ! 7za -bd -y -so x {path}/train-jpg.tar.7z | tar xf - -C {path} df = pd.read_csv(path/'train_v2.csv') df.head() tfms = get_transforms(flip_vert=True, max_lighting=0.1, max_zoom=1.05, max_warp=0.) np.random.seed(42) src = (ImageFileList.from_folder(path) .label_from_csv('train_v2.csv', sep=' ', folder='train-jpg', suffix='.jpg') .random_split_by_pct(0.2)) data = (src.datasets() .transform(tfms, size=128) .databunch().normalize(imagenet_stats)) data.show_batch(rows=3, figsize=(10,9)) arch = models.resnet50 acc_02 = partial(accuracy_thresh, thresh=0.2) f_score = partial(fbeta, thresh=0.2) learn = create_cnn(data, arch, metrics=[acc_02, f_score]) learn.lr_find() learn.recorder.plot() lr = 0.01 learn.fit_one_cycle(5, slice(lr)) learn.save('stage-1-rn50') learn.unfreeze() learn.lr_find() learn.recorder.plot() learn.fit_one_cycle(5, slice(1e-5, lr/5)) learn.save('stage-2-rn50') data = (src.datasets(ImageMultiDataset) .transform(tfms, size=256) .databunch().normalize(imagenet_stats)) learn.data = data data.train_ds[0][0].shape learn.freeze() learn.lr_find() learn.recorder.plot() lr=1e-2/2 learn.fit_one_cycle(5, slice(lr)) learn.save('stage-1-256-rn50') learn.unfreeze() learn.fit_one_cycle(5, slice(1e-5, lr/5)) learn.recorder.plot_losses() learn.save('stage-2-256-rn50') # ! kaggle competitions download -c planet-understanding-the-amazon-from-space -f test-jpg.tar.7z -p {path} # ! 7za -bd -y -so x {path}/test-jpg.tar.7z | tar xf - -C {path} learn.load('stage-2-256-rn50') learn.data = (src.add_test_folder('test-jpg') .datasets(ImageMultiDataset) .transform(tfms, size=256) .databunch().normalize(imagenet_stats))
0.356335
0.914634
## Integration over Polytopes #### Extra dependencies : matplotlib (if using methods : plot_polytope and plot_polynomial) ``` from sympy import sqrt from sympy.abc import x, y, z from sympy.geometry import * from sympy.integrals.intpoly import * ``` ## Methods : ### polytope_integrate(poly, expr, **kwargs) Integrates polynomials over 2/3-Polytopes. This function accepts the polytope in `poly` and the function in `expr` (uni/bi/trivariate polynomials are implemented) and returns the exact integral of `expr` over `poly`. Parameters --------------------------------------- 1. poly(Polygon) : 2/3-Polytope 2. expr(SymPy expression) : uni/bi-variate polynomial for 2-Polytope and uni/bi/tri-variate for 3-Polytope Optional Parameters --------------------------------------- 1. clockwise(Boolean) : If user is not sure about orientation of vertices of the 2-Polytope and wants to clockwise sort the points. 2. max_degree(Integer) : Maximum degree of any monomial of the input polynomial. This would require #### Examples : ``` triangle = Polygon(Point(0,0), Point(1,1), Point(1,0)) plot_polytope(triangle) print("Area of Triangle with vertices : (0,0), (1,1), (1,0) : ", polytope_integrate(triangle, 1)) print("x*y integrated over Triangle with vertices : (0,0), (1,1), (1,0) : ", polytope_integrate(triangle, x*y),"\n") hexagon = Polygon(Point(0, 0), Point(-sqrt(3) / 2, 0.5), Point(-sqrt(3) / 2, 3 / 2), Point(0, 2), Point(sqrt(3) / 2, 3 / 2), Point(sqrt(3) / 2, 0.5)) plot_polytope(hexagon) print("Area of regular hexagon with unit side length : ", polytope_integrate(hexagon, 1)) print("x + y**2 integrated over regular hexagon with unit side length : ", polytope_integrate(hexagon, x + y**2)) polys = [1, x, y, x*y] print("1, x, y, x*y integrated over hexagon : ", polytope_integrate(hexagon, polys, max_degree=2)) ``` ### main_integrate3d(expr, facets, vertices, hp_params) Function to translate the problem of integrating uni/bi/tri-variate polynomials over a 3-Polytope to integrating over its faces. This is done using Generalized Stokes's Theorem and Euler's Theorem. Parameters ------------------ 1. expr : The input polynomial 2. facets : Faces of the 3-Polytope(expressed as indices of `vertices`) 3. vertices : Vertices that constitute the Polytope 4. hp_params : Hyperplane Parameters of the facets #### Examples: ``` cube = [[(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1)], [2, 6, 7, 3], [3, 7, 5, 1], [7, 6, 4, 5], [1, 5, 4, 0], [3, 1, 0, 2], [0, 4, 6, 2]] vertices = cube[0] faces = cube[1:] hp_params = hyperplane_parameters(faces, vertices) main_integrate3d(1, faces, vertices, hp_params) ``` ### polygon_integrate(facet, index, facets, vertices, expr, degree) Helper function to integrate the input uni/bi/trivariate polynomial over a certain face of the 3-Polytope. Parameters ------------------ facet : Particular face of the 3-Polytope over which `expr` is integrated index : The index of `facet` in `facets` facets : Faces of the 3-Polytope(expressed as indices of `vertices`) vertices : Vertices that constitute the facet expr : The input polynomial degree : Degree of `expr` #### Examples: ``` cube = [[(0, 0, 0), (0, 0, 5), (0, 5, 0), (0, 5, 5), (5, 0, 0), (5, 0, 5), (5, 5, 0), (5, 5, 5)], [2, 6, 7, 3], [3, 7, 5, 1], [7, 6, 4, 5], [1, 5, 4, 0], [3, 1, 0, 2], [0, 4, 6, 2]] facet = cube[1] facets = cube[1:] vertices = cube[0] print("Area of polygon < [(0, 5, 0), (5, 5, 0), (5, 5, 5), (0, 5, 5)] > : ", polygon_integrate(facet, 0, facets, vertices, 1, 0)) ``` ### distance_to_side(point, line_seg) Helper function to compute the distance between given 3D point and a line segment. Parameters ----------------- point : 3D Point line_seg : Line Segment #### Examples: ``` point = (0, 0, 0) distance_to_side(point, [(0, 0, 1), (0, 1, 0)]) ``` ### lineseg_integrate(polygon, index, line_seg, expr, degree) Helper function to compute the line integral of `expr` over `line_seg` Parameters ------------- polygon : Face of a 3-Polytope index : index of line_seg in polygon line_seg : Line Segment #### Examples : ``` polygon = [(0, 5, 0), (5, 5, 0), (5, 5, 5), (0, 5, 5)] line_seg = [(0, 5, 0), (5, 5, 0)] print(lineseg_integrate(polygon, 0, line_seg, 1, 0)) ``` ### main_integrate(expr, facets, hp_params, max_degree=None) Function to translate the problem of integrating univariate/bivariate polynomials over a 2-Polytope to integrating over its boundary facets. This is done using Generalized Stokes's Theorem and Euler's Theorem. Parameters -------------------- expr : The input polynomial facets : Facets(Line Segments) of the 2-Polytope hp_params : Hyperplane Parameters of the facets Optional Parameters: -------------------- max_degree : The maximum degree of any monomial of the input polynomial. #### Examples: ``` triangle = Polygon(Point(0, 3), Point(5, 3), Point(1, 1)) facets = triangle.sides hp_params = hyperplane_parameters(triangle) print(main_integrate(x**2 + y**2, facets, hp_params)) ``` ### integration_reduction(facets, index, a, b, expr, dims, degree) This is a helper function for polytope_integrate. It relates the result of the integral of a polynomial over a d-dimensional entity to the result of the same integral of that polynomial over the (d - 1)-dimensional facet[index]. For the 2D case, surface integral --> line integrals --> evaluation of polynomial at vertices of line segments For the 3D case, volume integral --> 2D use case The only minor limitation is that some lines of code are 2D specific, but that can be easily changed. Note that this function is a helper one and works for a facet which bounds the polytope(i.e. the intersection point with the other facets is required), not for an independent line. Parameters ------------------ facets : List of facets that decide the region enclose by 2-Polytope. index : The index of the facet with respect to which the integral is supposed to be found. a, b : Hyperplane parameters corresponding to facets. expr : Uni/Bi-variate Polynomial dims : List of symbols denoting axes degree : Degree of the homogeneous polynoimal(expr) #### Examples: ``` facets = [Segment2D(Point(0, 0), Point(1, 1)), Segment2D(Point(1, 1), Point(1, 0)), Segment2D(Point(0, 0), Point(1, 0))] print(integration_reduction(facets, 0, (0, 1), 0, 1, [x, y], 0)) print(integration_reduction(facets, 1, (0, 1), 0, 1, [x, y], 0)) print(integration_reduction(facets, 2, (0, 1), 0, 1, [x, y], 0)) ``` ### hyperplane_parameters(poly) : poly : 2-Polytope Returns the list of hyperplane parameters for facets of the polygon. Limitation : 2D specific. #### Examples: ``` triangle = Polygon(Point(0,0), Point(1,1), Point(1,0)) hyperplane_parameters(triangle) ``` ### best_origin(a, b, lineseg, expr) : a, b : Line parameters of the line-segment expr : Uni/Bi-variate polynomial Returns a point on the lineseg whose vector inner product with the divergence of expr yields an expression with the least maximum total power. This is for reducing the number of computations in the integration reduction call. Limitation : 2D specific. #### Examples: ``` print("Best origin for x**3*y on x + y = 3 : ", best_origin((1,1), 3, Segment2D(Point(0, 3), Point(3, 0)), x**3*y)) print("Best origin for x*y**3 on x + y = 3 : ",best_origin((1,1), 3, Segment2D(Point(0, 3), Point(3, 0)), x*y**3)) ``` ### decompose(expr, separate=False) : expr : Uni/Bi-variate polynomial. separate(default : False) : If separate is True then return list of constituting monomials. Returns a dictionary of the terms having same total power. This is done to get homogeneous polynomials of different degrees from the expression. #### Examples: ``` print(decompose(1 + x + x**2 + x*y)) print(decompose(x**2 + x + y + 1 + x**3 + x**2*y + y**4 + x**3*y + y**2*x**2)) print(decompose(x**2 + x + y + 1 + x**3 + x**2*y + y**4 + x**3*y + y**2*x**2, 1)) ``` ### norm(expr) : point : Tuple/SymPy Point object/Dictionary Returns Euclidean norm of the point object. #### Examples: ``` print(norm((1, 2))) print(norm(Point(1, 2))) print(norm({x: 3, y: 3, z: 1})) ``` ### intersection(lineseg_1, lineseg_2) : lineseg_1, lineseg_2 : The input line segments whose intersection is to be found. Returns intersection point of two lines of which lineseg_1, lineseg_2 are part of. This function is called for adjacent line segments so the intersection point is always present with line segment boundaries. #### Examples: ``` print(intersection(Segment2D(Point(0, 0), Point(2, 2)), Segment2D(Point(1, 0), Point(0, 1)))) print(intersection(Segment2D(Point(2, 0), Point(2, 2)), Segment2D(Point(0, 0), Point(4, 4)))) ``` ### is_vertex(ent) : ent : Geometrical entity to denote a vertex. Returns True if ent is a vertex. Currently tuples of length 2 or 3 and SymPy Point object are supported. #### Examples: ``` print(is_vertex(Point(2, 8))) print(is_vertex(Point(2, 8, 1))) print(is_vertex((1, 1))) print(is_vertex([2, 9])) print(is_vertex(Polygon(Point(0, 0), Point(1, 1), Point(1, 0)))) ``` ### plot_polytope(poly) : poly : 2-Polytope Plots the 2-Polytope. Currently just defers it to plotting module in SymPy which in turn uses matplotlib. #### Examples: ``` hexagon = Polygon(Point(0, 0), Point(-sqrt(3) / 2, 0.5), Point(-sqrt(3) / 2, 3 / 2), Point(0, 2), Point(sqrt(3) / 2, 3 / 2), Point(sqrt(3) / 2, 0.5)) plot_polytope(hexagon) twist = Polygon(Point(-1, 1), Point(0, 0), Point(1, 1), Point(1, -1), Point(0, 0), Point(-1, -1)) plot_polytope(twist) ``` ### plot_polynomial(expr) : expr : The uni/bi-variate polynomial to plot Plots the polynomial. Currently just defers it to plotting module in SymPy which in turn uses matplotlib. #### Examples: ``` expr = x**2 plot_polynomial(expr) expr = x*y plot_polynomial(expr) ```
github_jupyter
from sympy import sqrt from sympy.abc import x, y, z from sympy.geometry import * from sympy.integrals.intpoly import * triangle = Polygon(Point(0,0), Point(1,1), Point(1,0)) plot_polytope(triangle) print("Area of Triangle with vertices : (0,0), (1,1), (1,0) : ", polytope_integrate(triangle, 1)) print("x*y integrated over Triangle with vertices : (0,0), (1,1), (1,0) : ", polytope_integrate(triangle, x*y),"\n") hexagon = Polygon(Point(0, 0), Point(-sqrt(3) / 2, 0.5), Point(-sqrt(3) / 2, 3 / 2), Point(0, 2), Point(sqrt(3) / 2, 3 / 2), Point(sqrt(3) / 2, 0.5)) plot_polytope(hexagon) print("Area of regular hexagon with unit side length : ", polytope_integrate(hexagon, 1)) print("x + y**2 integrated over regular hexagon with unit side length : ", polytope_integrate(hexagon, x + y**2)) polys = [1, x, y, x*y] print("1, x, y, x*y integrated over hexagon : ", polytope_integrate(hexagon, polys, max_degree=2)) cube = [[(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1)], [2, 6, 7, 3], [3, 7, 5, 1], [7, 6, 4, 5], [1, 5, 4, 0], [3, 1, 0, 2], [0, 4, 6, 2]] vertices = cube[0] faces = cube[1:] hp_params = hyperplane_parameters(faces, vertices) main_integrate3d(1, faces, vertices, hp_params) cube = [[(0, 0, 0), (0, 0, 5), (0, 5, 0), (0, 5, 5), (5, 0, 0), (5, 0, 5), (5, 5, 0), (5, 5, 5)], [2, 6, 7, 3], [3, 7, 5, 1], [7, 6, 4, 5], [1, 5, 4, 0], [3, 1, 0, 2], [0, 4, 6, 2]] facet = cube[1] facets = cube[1:] vertices = cube[0] print("Area of polygon < [(0, 5, 0), (5, 5, 0), (5, 5, 5), (0, 5, 5)] > : ", polygon_integrate(facet, 0, facets, vertices, 1, 0)) point = (0, 0, 0) distance_to_side(point, [(0, 0, 1), (0, 1, 0)]) polygon = [(0, 5, 0), (5, 5, 0), (5, 5, 5), (0, 5, 5)] line_seg = [(0, 5, 0), (5, 5, 0)] print(lineseg_integrate(polygon, 0, line_seg, 1, 0)) triangle = Polygon(Point(0, 3), Point(5, 3), Point(1, 1)) facets = triangle.sides hp_params = hyperplane_parameters(triangle) print(main_integrate(x**2 + y**2, facets, hp_params)) facets = [Segment2D(Point(0, 0), Point(1, 1)), Segment2D(Point(1, 1), Point(1, 0)), Segment2D(Point(0, 0), Point(1, 0))] print(integration_reduction(facets, 0, (0, 1), 0, 1, [x, y], 0)) print(integration_reduction(facets, 1, (0, 1), 0, 1, [x, y], 0)) print(integration_reduction(facets, 2, (0, 1), 0, 1, [x, y], 0)) triangle = Polygon(Point(0,0), Point(1,1), Point(1,0)) hyperplane_parameters(triangle) print("Best origin for x**3*y on x + y = 3 : ", best_origin((1,1), 3, Segment2D(Point(0, 3), Point(3, 0)), x**3*y)) print("Best origin for x*y**3 on x + y = 3 : ",best_origin((1,1), 3, Segment2D(Point(0, 3), Point(3, 0)), x*y**3)) print(decompose(1 + x + x**2 + x*y)) print(decompose(x**2 + x + y + 1 + x**3 + x**2*y + y**4 + x**3*y + y**2*x**2)) print(decompose(x**2 + x + y + 1 + x**3 + x**2*y + y**4 + x**3*y + y**2*x**2, 1)) print(norm((1, 2))) print(norm(Point(1, 2))) print(norm({x: 3, y: 3, z: 1})) print(intersection(Segment2D(Point(0, 0), Point(2, 2)), Segment2D(Point(1, 0), Point(0, 1)))) print(intersection(Segment2D(Point(2, 0), Point(2, 2)), Segment2D(Point(0, 0), Point(4, 4)))) print(is_vertex(Point(2, 8))) print(is_vertex(Point(2, 8, 1))) print(is_vertex((1, 1))) print(is_vertex([2, 9])) print(is_vertex(Polygon(Point(0, 0), Point(1, 1), Point(1, 0)))) hexagon = Polygon(Point(0, 0), Point(-sqrt(3) / 2, 0.5), Point(-sqrt(3) / 2, 3 / 2), Point(0, 2), Point(sqrt(3) / 2, 3 / 2), Point(sqrt(3) / 2, 0.5)) plot_polytope(hexagon) twist = Polygon(Point(-1, 1), Point(0, 0), Point(1, 1), Point(1, -1), Point(0, 0), Point(-1, -1)) plot_polytope(twist) expr = x**2 plot_polynomial(expr) expr = x*y plot_polynomial(expr)
0.429908
0.991381
**Comparing models performance to existing implementations** - This is not trying to beat the other implementations. It's just to show that with converted model we are getting similar results. - First is normal yolov3 with different sizes, then yolov3-tiny - Performance is close enough to original implementation - Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.534 - vs 55.3 in https://pjreddie.com/darknet/yolo/ - Differences come easily from better image processing etc - Training has happened with different processing than this notebooks evaluation - They probably resized so that object aspect ratio was kept same whereas we just resized (easier) - Also tuning confidence and nms thershold affect the value quite much - This notebook requires - cocapi: https://github.com/cocodataset/cocoapi - the right datasets: http://cocodataset.org/#download ## Setup ``` %matplotlib inline %reload_ext autoreload # use %autoreload command to reload all libraries from IPython.core.debugger import set_trace from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" #InteractiveShell.ast_node_interactivity = "last_expr" # Python path fixing so we can import libraries import sys import os sys_paths = ['../', # Adding yolov3_pytorch to python paths # '../../hands/fastai', # Fastai lib to help data handling etc: https://github.com/fastai/fastai '../../../data/coco/cocoapi/PythonAPI', ] for p in sys_paths: p = os.path.abspath(p) if p not in sys.path: sys.path.append(p) import time import torch import torch.optim as optim import torch.nn as nn from torchvision import datasets, transforms from matplotlib import patches, patheffects import json import pandas as pd print("Pytorch: {}".format(torch.__version__)) from yolov3_pytorch.utils import * from yolov3_pytorch.yolov3 import * from yolov3_pytorch.yolov3_tiny import * from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval ``` # Codo Data Load and Examples ``` # anno_json = '../../../data/coco/annotations/image_info_test-dev2017.json' anno_json = '../../../data/coco/annotations/instances_val2017.json' img_path = '../../../data/coco/val2017' with open(anno_json) as f: data = json.load(f) data.keys() data['info'] ``` ``` class_conversion = [(i, dic['id']) for i, dic in enumerate(data['categories'])] class_conversion = dict(class_conversion) class_conversion[0] class_names = dict([(dic['id'], dic['name']) for i, dic in enumerate(data['categories'])]) class_names[1] len(data['images']) len(data['categories']) data['images'][0] data['categories'][0] data['categories'][16] pd_annotations = pd.DataFrame.from_dict(data['annotations']) img_id = 37777 pd_annotations[pd_annotations['image_id']==img_id][:2] ``` ``` fnames = [d['file_name'] for d in data['images']] fids = np.array([[d['id'], d['width'], d['height']] for d in data['images']]) fnames[:3] fids[:3] ``` # Load Model ``` model = Yolov3(num_classes=80) model.load_state_dict(torch.load('../data/models/yolov3_coco_01.h5')) _ = model.eval().cuda() def img_fname(idx): return f"{img_path}/{idx:012d}.jpg" ``` **Checking that we get sensible predictions from data** ``` img_id = 37777 img_org = Image.open(img_fname(img_id)).convert('RGB') img_tensor = image2torch(img_org.resize((416, 416))).cuda() # For checking the results directly _ = nms(model.predict_img(img_tensor)[0], .2) plot_img_detections(img_tensor[0], _, figsize=(6, 6), class_names=list(class_names.values())) ``` # Predicting ``` def predict_all(data_imgs, sz=416, conf_thresh=.2, nms_thresh=.4): results = [] img_ids = [] for dat in tqdm(data_imgs): fname = dat['file_name'] f_id = dat['id'] img_ids.append(f_id) img = Image.open(img_fname(f_id)).convert('RGB') if sz: #img = img.resize((np.array(img), (sz, sz), interpolation=cv2.INTER_AREA) img = img.resize((sz, sz)) img_torch = image2torch(img).cuda() all_boxes = model.predict_img(img_torch, conf_thresh=conf_thresh)[0] boxes = nms(all_boxes, nms_thresh=nms_thresh) width = dat['width'] height = dat['height'] for pred in boxes: box = np.array(pred[:4]) box[:2] -= box[2:4]/2 # box[2:4] = box[2:4]/2 + box[:2] x,w = box[0]*dat['width'], box[2]*dat['width'] y,h = box[1]*dat['height'], box[3]*dat['height'] cat = class_conversion[int(pred[-1])] res = {"image_id":f_id, "category_id":cat, "bbox":[x, y, w, h], "score": pred[-2]} results.append(res) print(f"Results total {len(results)}. N of files {len(img_ids)}") return results, img_ids # results, img_ids = predict_all(data['images'][0:max_len], conf_thresh=.25, nms_thresh=.2) results, img_ids = predict_all(data['images'], conf_thresh=.2, nms_thresh=.4) len(img_ids) len(results) results[0] ``` ``` #img_id = results[10]['image_id'] img_id = 37777 img_org = Image.open(img_fname(img_id)).convert('RGB') img_boxes = [] img_classes = [] for r in results[0:1000]: if r['image_id'] == img_id: img_boxes.append(r['bbox']) img_classes.append(r['category_id']) len(img_classes) img_classes[0:10] img_boxes[0] plot_img_boxes(img_org, img_boxes, img_classes, figsize=(8,8), real_pixels=True, box_centered=False, class_names=class_names) ``` # Cocotools Evaluation ``` cocoGt=COCO(anno_json) def coco_map_eval(results, img_ids, tmp_fname = '/tmp/coco_result_tmp_01.json'): print(f"With sample size {len(img_ids)}") with open(tmp_fname, 'wt') as outfile: json.dump(results, outfile) cocoDt=cocoGt.loadRes(tmp_fname) cocoEval = COCOeval(cocoGt,cocoDt,'bbox') cocoEval.params.imgIds = img_ids cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() len(results) results[:3] coco_map_eval(results, img_ids) ``` # Yolov3 608 ``` model = Yolov3(num_classes=80) model.load_state_dict(torch.load('../data/models/yolov3_coco_01.h5')) _ = model.eval().cuda() results, img_ids = predict_all(data['images'], conf_thresh=.2, nms_thresh=.4, sz=608) coco_map_eval(results, img_ids) ``` # Yolov3 Tiny ``` model = Yolov3Tiny(num_classes=80, use_wrong_previous_anchors=True) model.load_state_dict(torch.load('../data/models/yolov3_tiny_coco_01.h5')) _ = model.eval().cuda() # results, img_ids = predict_all(data['images'][0:max_len], conf_thresh=.25, nms_thresh=.2) results, img_ids = predict_all(data['images'], conf_thresh=.1, nms_thresh=.3) coco_map_eval(results, img_ids) ```
github_jupyter
%matplotlib inline %reload_ext autoreload # use %autoreload command to reload all libraries from IPython.core.debugger import set_trace from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" #InteractiveShell.ast_node_interactivity = "last_expr" # Python path fixing so we can import libraries import sys import os sys_paths = ['../', # Adding yolov3_pytorch to python paths # '../../hands/fastai', # Fastai lib to help data handling etc: https://github.com/fastai/fastai '../../../data/coco/cocoapi/PythonAPI', ] for p in sys_paths: p = os.path.abspath(p) if p not in sys.path: sys.path.append(p) import time import torch import torch.optim as optim import torch.nn as nn from torchvision import datasets, transforms from matplotlib import patches, patheffects import json import pandas as pd print("Pytorch: {}".format(torch.__version__)) from yolov3_pytorch.utils import * from yolov3_pytorch.yolov3 import * from yolov3_pytorch.yolov3_tiny import * from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval # anno_json = '../../../data/coco/annotations/image_info_test-dev2017.json' anno_json = '../../../data/coco/annotations/instances_val2017.json' img_path = '../../../data/coco/val2017' with open(anno_json) as f: data = json.load(f) data.keys() data['info'] class_conversion = [(i, dic['id']) for i, dic in enumerate(data['categories'])] class_conversion = dict(class_conversion) class_conversion[0] class_names = dict([(dic['id'], dic['name']) for i, dic in enumerate(data['categories'])]) class_names[1] len(data['images']) len(data['categories']) data['images'][0] data['categories'][0] data['categories'][16] pd_annotations = pd.DataFrame.from_dict(data['annotations']) img_id = 37777 pd_annotations[pd_annotations['image_id']==img_id][:2] fnames = [d['file_name'] for d in data['images']] fids = np.array([[d['id'], d['width'], d['height']] for d in data['images']]) fnames[:3] fids[:3] model = Yolov3(num_classes=80) model.load_state_dict(torch.load('../data/models/yolov3_coco_01.h5')) _ = model.eval().cuda() def img_fname(idx): return f"{img_path}/{idx:012d}.jpg" img_id = 37777 img_org = Image.open(img_fname(img_id)).convert('RGB') img_tensor = image2torch(img_org.resize((416, 416))).cuda() # For checking the results directly _ = nms(model.predict_img(img_tensor)[0], .2) plot_img_detections(img_tensor[0], _, figsize=(6, 6), class_names=list(class_names.values())) def predict_all(data_imgs, sz=416, conf_thresh=.2, nms_thresh=.4): results = [] img_ids = [] for dat in tqdm(data_imgs): fname = dat['file_name'] f_id = dat['id'] img_ids.append(f_id) img = Image.open(img_fname(f_id)).convert('RGB') if sz: #img = img.resize((np.array(img), (sz, sz), interpolation=cv2.INTER_AREA) img = img.resize((sz, sz)) img_torch = image2torch(img).cuda() all_boxes = model.predict_img(img_torch, conf_thresh=conf_thresh)[0] boxes = nms(all_boxes, nms_thresh=nms_thresh) width = dat['width'] height = dat['height'] for pred in boxes: box = np.array(pred[:4]) box[:2] -= box[2:4]/2 # box[2:4] = box[2:4]/2 + box[:2] x,w = box[0]*dat['width'], box[2]*dat['width'] y,h = box[1]*dat['height'], box[3]*dat['height'] cat = class_conversion[int(pred[-1])] res = {"image_id":f_id, "category_id":cat, "bbox":[x, y, w, h], "score": pred[-2]} results.append(res) print(f"Results total {len(results)}. N of files {len(img_ids)}") return results, img_ids # results, img_ids = predict_all(data['images'][0:max_len], conf_thresh=.25, nms_thresh=.2) results, img_ids = predict_all(data['images'], conf_thresh=.2, nms_thresh=.4) len(img_ids) len(results) results[0] #img_id = results[10]['image_id'] img_id = 37777 img_org = Image.open(img_fname(img_id)).convert('RGB') img_boxes = [] img_classes = [] for r in results[0:1000]: if r['image_id'] == img_id: img_boxes.append(r['bbox']) img_classes.append(r['category_id']) len(img_classes) img_classes[0:10] img_boxes[0] plot_img_boxes(img_org, img_boxes, img_classes, figsize=(8,8), real_pixels=True, box_centered=False, class_names=class_names) cocoGt=COCO(anno_json) def coco_map_eval(results, img_ids, tmp_fname = '/tmp/coco_result_tmp_01.json'): print(f"With sample size {len(img_ids)}") with open(tmp_fname, 'wt') as outfile: json.dump(results, outfile) cocoDt=cocoGt.loadRes(tmp_fname) cocoEval = COCOeval(cocoGt,cocoDt,'bbox') cocoEval.params.imgIds = img_ids cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() len(results) results[:3] coco_map_eval(results, img_ids) model = Yolov3(num_classes=80) model.load_state_dict(torch.load('../data/models/yolov3_coco_01.h5')) _ = model.eval().cuda() results, img_ids = predict_all(data['images'], conf_thresh=.2, nms_thresh=.4, sz=608) coco_map_eval(results, img_ids) model = Yolov3Tiny(num_classes=80, use_wrong_previous_anchors=True) model.load_state_dict(torch.load('../data/models/yolov3_tiny_coco_01.h5')) _ = model.eval().cuda() # results, img_ids = predict_all(data['images'][0:max_len], conf_thresh=.25, nms_thresh=.2) results, img_ids = predict_all(data['images'], conf_thresh=.1, nms_thresh=.3) coco_map_eval(results, img_ids)
0.339609
0.790288
``` import pandas as pd import numpy as np import spacy import sys import warnings from imp import reload from gensim.corpora import Dictionary from importlib import reload from tensorflow.keras.preprocessing import sequence from tensorflow.keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, GRU, Flatten from tensorflow.keras.layers import Bidirectional, GlobalMaxPool1D from tensorflow.keras.models import Model, Sequential from tensorflow.keras.layers import Convolution1D from tensorflow.keras import initializers, regularizers, constraints, optimizers, layers from sklearn.metrics import f1_score from tensorflow.keras.models import load_model warnings.filterwarnings('ignore') if sys.version[0] == '2': reload(sys) sys.setdefaultencoding("utf-8") nlp = spacy.load('en') df1 = pd.read_csv('datasets/labeledTrainData.tsv', delimiter='\t') df1 = df1.drop(['id'], axis=1) df1.head() df2 = pd.read_csv('datasets/imdb_master.csv', encoding='latin-1') df2.head() df2 = df2.drop(['Unnamed: 0','type','file'],axis=1) df2.columns = ["review","sentiment"] df2.head() df2 = df2[df2.sentiment != 'unsup'] df2['sentiment'] = df2['sentiment'].map({'pos': 1, 'neg': 0}) df2.head() df = pd.concat([df1, df2]).reset_index(drop=True) df.head() MAX_SEQUENCE_LEN = 130 UNK = 'UNK' PAD = 'PAD' def text_to_id_list(text, dictionary): return [dictionary.token2id.get(tok, dictionary.token2id.get(UNK)) for tok in text_to_tokens(text)] def texts_to_input(texts, dictionary): return sequence.pad_sequences( list(map(lambda x: text_to_id_list(x, dictionary), texts)), maxlen=MAX_SEQUENCE_LEN, padding='post', truncating='post', value=dictionary.token2id.get(PAD)) def text_to_tokens(text): return [tok.text.lower() for tok in nlp.tokenizer(text) if not (tok.is_punct or tok.is_quote)] def build_dictionary(texts): d = Dictionary(text_to_tokens(t)for t in texts) d.filter_extremes(no_below=3, no_above=1) d.add_documents([[UNK, PAD]]) d.compactify() return d dictionary = build_dictionary(df.review) dictionary.save('dictionary-sentiment') loaded_dict = Dictionary.load('dictionary-sentiment') len(loaded_dict) x_train = texts_to_input(df.review, dictionary) x_train y_train = df['sentiment'] y_train y_train = np.asarray(y_train) y_train max_features = 6000 maxlen = 130 embed_size = 128 model = Sequential() model.add(Embedding(len(dictionary), embed_size)) model.add(Bidirectional(LSTM(32, return_sequences = True))) model.add(GlobalMaxPool1D()) model.add(Dense(20, activation="relu")) model.add(Dropout(0.05)) model.add(Dense(1, activation="sigmoid")) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) batch_size = 100 epochs = 3 model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.2) df_test = pd.read_csv('datasets/testData.tsv', header=0, delimiter='\t', quoting=3) df_test.head() df_test["sentiment"] = df_test["id"].map(lambda x: 1 if int(x.strip('"').split("_")[1]) >= 5 else 0) df_test.head() y_test = df_test["sentiment"] x_test = texts_to_input(df_test.review, dictionary) prediction = model.predict(x_test) y_pred = (prediction > 0.5) from sklearn.metrics import f1_score print('F1-score: {0}'.format(f1_score(y_pred, y_test))) model.save('sentiment_model-v2.h5') ```
github_jupyter
import pandas as pd import numpy as np import spacy import sys import warnings from imp import reload from gensim.corpora import Dictionary from importlib import reload from tensorflow.keras.preprocessing import sequence from tensorflow.keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, GRU, Flatten from tensorflow.keras.layers import Bidirectional, GlobalMaxPool1D from tensorflow.keras.models import Model, Sequential from tensorflow.keras.layers import Convolution1D from tensorflow.keras import initializers, regularizers, constraints, optimizers, layers from sklearn.metrics import f1_score from tensorflow.keras.models import load_model warnings.filterwarnings('ignore') if sys.version[0] == '2': reload(sys) sys.setdefaultencoding("utf-8") nlp = spacy.load('en') df1 = pd.read_csv('datasets/labeledTrainData.tsv', delimiter='\t') df1 = df1.drop(['id'], axis=1) df1.head() df2 = pd.read_csv('datasets/imdb_master.csv', encoding='latin-1') df2.head() df2 = df2.drop(['Unnamed: 0','type','file'],axis=1) df2.columns = ["review","sentiment"] df2.head() df2 = df2[df2.sentiment != 'unsup'] df2['sentiment'] = df2['sentiment'].map({'pos': 1, 'neg': 0}) df2.head() df = pd.concat([df1, df2]).reset_index(drop=True) df.head() MAX_SEQUENCE_LEN = 130 UNK = 'UNK' PAD = 'PAD' def text_to_id_list(text, dictionary): return [dictionary.token2id.get(tok, dictionary.token2id.get(UNK)) for tok in text_to_tokens(text)] def texts_to_input(texts, dictionary): return sequence.pad_sequences( list(map(lambda x: text_to_id_list(x, dictionary), texts)), maxlen=MAX_SEQUENCE_LEN, padding='post', truncating='post', value=dictionary.token2id.get(PAD)) def text_to_tokens(text): return [tok.text.lower() for tok in nlp.tokenizer(text) if not (tok.is_punct or tok.is_quote)] def build_dictionary(texts): d = Dictionary(text_to_tokens(t)for t in texts) d.filter_extremes(no_below=3, no_above=1) d.add_documents([[UNK, PAD]]) d.compactify() return d dictionary = build_dictionary(df.review) dictionary.save('dictionary-sentiment') loaded_dict = Dictionary.load('dictionary-sentiment') len(loaded_dict) x_train = texts_to_input(df.review, dictionary) x_train y_train = df['sentiment'] y_train y_train = np.asarray(y_train) y_train max_features = 6000 maxlen = 130 embed_size = 128 model = Sequential() model.add(Embedding(len(dictionary), embed_size)) model.add(Bidirectional(LSTM(32, return_sequences = True))) model.add(GlobalMaxPool1D()) model.add(Dense(20, activation="relu")) model.add(Dropout(0.05)) model.add(Dense(1, activation="sigmoid")) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) batch_size = 100 epochs = 3 model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.2) df_test = pd.read_csv('datasets/testData.tsv', header=0, delimiter='\t', quoting=3) df_test.head() df_test["sentiment"] = df_test["id"].map(lambda x: 1 if int(x.strip('"').split("_")[1]) >= 5 else 0) df_test.head() y_test = df_test["sentiment"] x_test = texts_to_input(df_test.review, dictionary) prediction = model.predict(x_test) y_pred = (prediction > 0.5) from sklearn.metrics import f1_score print('F1-score: {0}'.format(f1_score(y_pred, y_test))) model.save('sentiment_model-v2.h5')
0.517327
0.272908
Adapted from https://www.tutorialspoint.com/webgl/webgl_sample_application.htm ``` import feedWebGL2.feedback as fd from ipywidgets import interact, interactive, fixed, interact_manual import numpy as np fd.widen_notebook() np.set_printoptions(precision=4) corners = 0.5 * np.array([ [1, 1, 1], [1, -1, -1], [-1, -1, 1], [-1, 1, -1], ]) colors = corners + 0.5 def tetrahedron_triangles(corners): triangles = np.zeros([4, 3, 3], dtype=np.float) for i in range(4): triangles[i, :i] = corners[:i] triangles[i, i:] = corners[i+1:] return triangles triangles = tetrahedron_triangles(corners) tcolors = tetrahedron_triangles(colors) # make faces "flat colored" if 0: for i in range(4): for j in range(3): tcolors[i,j] = colors[i] def matrix1(phi, i=0): result = np.eye(4) result[0,0] = np.cos(phi) result[i,i] = np.cos(phi) result[0,i] = np.sin(phi) result[i,0] = -np.sin(phi) return result def matrix(phi=0.0, theta=0.0, xt=0, yt=0, zt=0): M1 = matrix1(phi, 1) #print(M1) M2 = matrix1(theta, 2) #print(M2) M12 = M1.dot(M2) Mt = np.eye(4) Mt[3,0] = xt Mt[3,1] = yt Mt[3,2] = zt return M12.dot(Mt) M = matrix(1.0, 2.0, 0.1, -0.1, 0.2) M vertices = triangles.ravel() def rotate(phi=-0.5, theta=0.0, xt=0.0, yt=0.0, zt=0.0): M = matrix(phi * np.pi, theta * np.pi, xt, yt, zt) assert np.abs(np.linalg.det(M) - 1.0) < 0.0001 feedback_program.change_uniform_vector("rotation_matrix", M.ravel()) feedback_program.run() return M vertex_shader = """#version 300 es uniform mat4 rotation_matrix; in vec3 coordinates; in vec3 vcolor; out vec3 output_vertex; out vec3 coord_color; void main() { coord_color = vcolor; gl_Position = vec4(coordinates, 1.0); gl_Position = rotation_matrix * gl_Position; gl_Position[3] = 1.0; output_vertex = gl_Position.xyz; } """ fragment_shader = """#version 300 es // For some reason it is required to specify precision, otherwise error. precision highp float; in vec3 coord_color; //out vec4 color; out vec4 fragmentColor; void main() { fragmentColor = vec4(coord_color, 1.0); } """ feedback_program = fd.FeedbackProgram( program = fd.Program( vertex_shader = vertex_shader, fragment_shader = fragment_shader, feedbacks = fd.Feedbacks( output_vertex = fd.Feedback(num_components=3), ), ), runner = fd.Runner( vertices_per_instance = 3 * len(triangles), run_type = "TRIANGLES", uniforms = fd.Uniforms( rotation_matrix = fd.Uniform( default_value = list(M.ravel()), vtype = "4fv", is_matrix = True, ), ), inputs = fd.Inputs( coordinates = fd.Input( num_components = 3, from_buffer = fd.BufferLocation( name = "coordinates_buffer", # start at the beginning, don't skip any values... ), ), vcolor = fd.Input( num_components = 3, from_buffer = fd.BufferLocation( name = "colors_buffer", # start at the beginning, don't skip any values... ), ), ), ), context = fd.Context( buffers = fd.Buffers( coordinates_buffer = fd.Buffer( array=list(vertices), ), colors_buffer = fd.Buffer( array=list(tcolors.ravel()), ) ), width = 600, show = True, ), ) # display the widget and debugging information feedback_program.debugging_display() #feedback_program #feedback_program.run() interact(rotate, phi=(-1.0, 1.0), theta=(-1.0, 1.0), xt=(-1.0, 1.0), yt=(-1.0, 1.0), zt=(-1.0, 1.0)) #move_corner(x=-0.1) A1 = np.array(feedback_program.get_feedback("output_vertex")) A1 A2 = np.array(feedback_program.get_feedback("output_vertex")) A2 A1 - A2 colors np.abs(np.linalg.det(M)) ```
github_jupyter
import feedWebGL2.feedback as fd from ipywidgets import interact, interactive, fixed, interact_manual import numpy as np fd.widen_notebook() np.set_printoptions(precision=4) corners = 0.5 * np.array([ [1, 1, 1], [1, -1, -1], [-1, -1, 1], [-1, 1, -1], ]) colors = corners + 0.5 def tetrahedron_triangles(corners): triangles = np.zeros([4, 3, 3], dtype=np.float) for i in range(4): triangles[i, :i] = corners[:i] triangles[i, i:] = corners[i+1:] return triangles triangles = tetrahedron_triangles(corners) tcolors = tetrahedron_triangles(colors) # make faces "flat colored" if 0: for i in range(4): for j in range(3): tcolors[i,j] = colors[i] def matrix1(phi, i=0): result = np.eye(4) result[0,0] = np.cos(phi) result[i,i] = np.cos(phi) result[0,i] = np.sin(phi) result[i,0] = -np.sin(phi) return result def matrix(phi=0.0, theta=0.0, xt=0, yt=0, zt=0): M1 = matrix1(phi, 1) #print(M1) M2 = matrix1(theta, 2) #print(M2) M12 = M1.dot(M2) Mt = np.eye(4) Mt[3,0] = xt Mt[3,1] = yt Mt[3,2] = zt return M12.dot(Mt) M = matrix(1.0, 2.0, 0.1, -0.1, 0.2) M vertices = triangles.ravel() def rotate(phi=-0.5, theta=0.0, xt=0.0, yt=0.0, zt=0.0): M = matrix(phi * np.pi, theta * np.pi, xt, yt, zt) assert np.abs(np.linalg.det(M) - 1.0) < 0.0001 feedback_program.change_uniform_vector("rotation_matrix", M.ravel()) feedback_program.run() return M vertex_shader = """#version 300 es uniform mat4 rotation_matrix; in vec3 coordinates; in vec3 vcolor; out vec3 output_vertex; out vec3 coord_color; void main() { coord_color = vcolor; gl_Position = vec4(coordinates, 1.0); gl_Position = rotation_matrix * gl_Position; gl_Position[3] = 1.0; output_vertex = gl_Position.xyz; } """ fragment_shader = """#version 300 es // For some reason it is required to specify precision, otherwise error. precision highp float; in vec3 coord_color; //out vec4 color; out vec4 fragmentColor; void main() { fragmentColor = vec4(coord_color, 1.0); } """ feedback_program = fd.FeedbackProgram( program = fd.Program( vertex_shader = vertex_shader, fragment_shader = fragment_shader, feedbacks = fd.Feedbacks( output_vertex = fd.Feedback(num_components=3), ), ), runner = fd.Runner( vertices_per_instance = 3 * len(triangles), run_type = "TRIANGLES", uniforms = fd.Uniforms( rotation_matrix = fd.Uniform( default_value = list(M.ravel()), vtype = "4fv", is_matrix = True, ), ), inputs = fd.Inputs( coordinates = fd.Input( num_components = 3, from_buffer = fd.BufferLocation( name = "coordinates_buffer", # start at the beginning, don't skip any values... ), ), vcolor = fd.Input( num_components = 3, from_buffer = fd.BufferLocation( name = "colors_buffer", # start at the beginning, don't skip any values... ), ), ), ), context = fd.Context( buffers = fd.Buffers( coordinates_buffer = fd.Buffer( array=list(vertices), ), colors_buffer = fd.Buffer( array=list(tcolors.ravel()), ) ), width = 600, show = True, ), ) # display the widget and debugging information feedback_program.debugging_display() #feedback_program #feedback_program.run() interact(rotate, phi=(-1.0, 1.0), theta=(-1.0, 1.0), xt=(-1.0, 1.0), yt=(-1.0, 1.0), zt=(-1.0, 1.0)) #move_corner(x=-0.1) A1 = np.array(feedback_program.get_feedback("output_vertex")) A1 A2 = np.array(feedback_program.get_feedback("output_vertex")) A2 A1 - A2 colors np.abs(np.linalg.det(M))
0.315103
0.800497
# Ocean and Land Colour Instrument - OLCI ## Ocean Colour data, from the Sentinel-3 Ocean and Land Colour Instrument (OLCI), provides a window into the ocean living ecosystems. OLCI provides spectral information on the colour of the oceans. This data can be used to monitor global ocean primary production by phytoplankton, the basis of nearly all life in our seas. Ocean colour data is also vital to understand climate change — ocean colour is one of the Essential Climate Variables listed by the World Meteorological Organization to detect biological activity in the ocean’s surface layer. Phytoplankton take up carbon dioxide (CO2) during photosynthesis, making them important carbon sinks. Ocean colour data can be used to monitor the annual global uptake of CO2 by phytoplankton on a global scale. Using this data we can study the wider Earth system, for instance the El Niño/La Niña phenomena and how these impacts the ocean ecosystem. Beyond climate, ocean colour data is also useful to look at more sporadic events. OLCI data can be used track sediment transport, monitor coastal water quality and track and forecast harmful algal blooms that are a danger to humans, marine/freshwater life and aquaculture. The global picture of ocean ecosystems provided by ocean colour data can guide sustainable marine resource management and support reporting obligations of the European Union's legislation within Marine Strategy Framework Directive and Water Framework Directive, the goal of which is to achieve or maintain Good Environmental Status of the seas by the year 2020 Further information on the sensor and its data can be found at [http://olci.eumetsat.int](http://olci.eumetsat.int) ### Data Download There are several options when it comes to downloading Sentinel-3 OLCI data. The option can be generally split into ; * Copernicus Online Data Access - CODA ([more info](https://www.eumetsat.int/website/home/Data/DataDelivery/CopernicusOnlineDataAccess/index.html)) * EUMETSAT Data Centre ([more info](https://www.eumetsat.int/website/home/Data/DataDelivery/EUMETSATDataCentre/index.html)) * Reprocessed data - CODAREP ([more info](https://codarep.eumetsat.int)) ### exercises These notebooks will walk you through several exercises that will cover : * [Basic spatial subsetting and intergoation](./12_OLCI_spatial_interrogation.ipynb) * [Basic spectral interrogation](./13_OLCI_spectral_interrogation.ipynb) There are also notebooks that cover more complex topics such as : * assessing the water constituents using OLCI * comparison of different chlorophyll algorithms * comparison of level 1 and level 2 data <br> <a href="/notebooks/00_index.ipynb"><< Index</a><span style="float:right;"><a href="./12_OLCI_spatial_interrogation.ipynb">12 - Ocean and Land Colour Instrument - spatial interrogation >></a> <hr> <p style="text-align:left;">This project is licensed under the <a href="./LICENSE">MIT License</a> <span style="float:right;"><a href="https://gitlab.eumetsat.int/eo-lab-usc-open/ocean">View on GitLab</a> | <a href="https://training.eumetsat.int/">EUMETSAT Training</a> | <a href=mailto:training@eumetsat.int>Contact</a></span></p>
github_jupyter
# Ocean and Land Colour Instrument - OLCI ## Ocean Colour data, from the Sentinel-3 Ocean and Land Colour Instrument (OLCI), provides a window into the ocean living ecosystems. OLCI provides spectral information on the colour of the oceans. This data can be used to monitor global ocean primary production by phytoplankton, the basis of nearly all life in our seas. Ocean colour data is also vital to understand climate change — ocean colour is one of the Essential Climate Variables listed by the World Meteorological Organization to detect biological activity in the ocean’s surface layer. Phytoplankton take up carbon dioxide (CO2) during photosynthesis, making them important carbon sinks. Ocean colour data can be used to monitor the annual global uptake of CO2 by phytoplankton on a global scale. Using this data we can study the wider Earth system, for instance the El Niño/La Niña phenomena and how these impacts the ocean ecosystem. Beyond climate, ocean colour data is also useful to look at more sporadic events. OLCI data can be used track sediment transport, monitor coastal water quality and track and forecast harmful algal blooms that are a danger to humans, marine/freshwater life and aquaculture. The global picture of ocean ecosystems provided by ocean colour data can guide sustainable marine resource management and support reporting obligations of the European Union's legislation within Marine Strategy Framework Directive and Water Framework Directive, the goal of which is to achieve or maintain Good Environmental Status of the seas by the year 2020 Further information on the sensor and its data can be found at [http://olci.eumetsat.int](http://olci.eumetsat.int) ### Data Download There are several options when it comes to downloading Sentinel-3 OLCI data. The option can be generally split into ; * Copernicus Online Data Access - CODA ([more info](https://www.eumetsat.int/website/home/Data/DataDelivery/CopernicusOnlineDataAccess/index.html)) * EUMETSAT Data Centre ([more info](https://www.eumetsat.int/website/home/Data/DataDelivery/EUMETSATDataCentre/index.html)) * Reprocessed data - CODAREP ([more info](https://codarep.eumetsat.int)) ### exercises These notebooks will walk you through several exercises that will cover : * [Basic spatial subsetting and intergoation](./12_OLCI_spatial_interrogation.ipynb) * [Basic spectral interrogation](./13_OLCI_spectral_interrogation.ipynb) There are also notebooks that cover more complex topics such as : * assessing the water constituents using OLCI * comparison of different chlorophyll algorithms * comparison of level 1 and level 2 data <br> <a href="/notebooks/00_index.ipynb"><< Index</a><span style="float:right;"><a href="./12_OLCI_spatial_interrogation.ipynb">12 - Ocean and Land Colour Instrument - spatial interrogation >></a> <hr> <p style="text-align:left;">This project is licensed under the <a href="./LICENSE">MIT License</a> <span style="float:right;"><a href="https://gitlab.eumetsat.int/eo-lab-usc-open/ocean">View on GitLab</a> | <a href="https://training.eumetsat.int/">EUMETSAT Training</a> | <a href=mailto:training@eumetsat.int>Contact</a></span></p>
0.757525
0.982372
``` # Standard imports import pandas as pd import matplotlib.pyplot as plt import numpy as np import re import seaborn as sns import time import tensorflow as tf from tensorflow.keras.backend import get_value %matplotlib inline # Insert mavenn at beginning of path import sys path_to_mavenn_local = '../../' sys.path.insert(0, path_to_mavenn_local) #Load mavenn and check path import mavenn print(mavenn.__path__) # Import dataset splitter from sklearn from sklearn.model_selection import train_test_split # Load dataset as a dataframe data_df = mavenn.load_example_dataset('mpsa') # Extract x and y as np.arrays x = data_df['x'].values y = data_df['y'].values # Split into training and test sets x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=0) # Define a model with a pairwise G-P map # a heteroskedastic Gaussian GE measurement process, # and specify the training data. mavenn.set_seed(0) model = mavenn.Model(x=x_train, y=y_train, gpmap_type='pairwise', alphabet='dna', regression_type='GE', ge_noise_model_type='SkewedT', ge_nonlinearity_monotonic=True, ge_heteroskedasticity_order=2) # Fit model to training data start_time = time.time() model.fit(epochs=10, learning_rate=0.005, early_stopping=True, early_stopping_patience=20) training_time = time.time()-start_time print(f'training time: {training_time:.1f} seconds') model.arg_dict.keys() # Save model model.save('mpsa') # Load model model = mavenn.load('mpsa') model.get_nn().summary() # Predict latent phentoype values (phi) on test data phi_test = model.x_to_phi(x_test) # Predict measurement values (yhat) on test data yhat_test = model.x_to_yhat(x_test) # Compute R^2 between yhat and y_test Rsq = np.corrcoef(yhat_test.ravel(), y_test)[0, 1]**2 # Set phi lims and create grid in phi space phi_lim = [min(phi_test)-.5, max(phi_test)+.5] phi_grid = np.linspace(phi_lim[0], phi_lim[1], 1000) # Compute yhat each phi gridpoint yhat_grid = model.phi_to_yhat(phi_grid) # Compute 68% CI for each yhat yqs_grid = model.yhat_to_yq(yhat_grid, q=[0.16, 0.84]) # Create figure and axes fig, axs = plt.subplots(1, 2, figsize=[8, 4]) # Left panel: illustrate measurement process (y vs. phi) ax = axs[0] ax.scatter(phi_test, y_test, color='C0', s=5, alpha=.2, label='test data') ax.plot(phi_grid, yhat_grid, linewidth=2, color='C1', label='$\hat{y} = g(\phi)$') ax.plot(phi_grid, yqs_grid[:, 0], linestyle='--', color='C1', label='68% CI') ax.plot(phi_grid, yqs_grid[:, 1], linestyle='--', color='C1') ax.set_xlim(phi_lim) ax.set_xlabel('latent phenotype ($\phi$)') ax.set_ylabel('measurement ($y$)') ax.set_title('measurement process') ax.legend() # Center panel: illustrate model performance (y vs. yhat) ax = axs[1] #ys = np.vstack([y_test]) ax.scatter(yhat_test, y_test, color='C0', s=5, alpha=.2, label='test data') #ax.set_autoscale_on(False) lims = ax.get_xlim() ax.plot(lims, lims, linestyle=':', color='k', label='$y=\hat{y}$') ax.set_xlabel('model prediction ($\hat{y}$)') ax.set_ylabel('measurement ($y$)') ax.set_title(f'performance ($R^2$={Rsq:.3})') ax.legend() # Tighten bounds on figure fig.tight_layout(w_pad=3) plt.show() # Compute mask_dict from trainig data mask_dict = mavenn.get_mask_dict(x_train, alphabet='dna') mask_dict wt_seq = mavenn.x_to_consensus(x_train) theta_add_df = model.get_gpmap_parameters(which='additive') theta_add_df.head() # Illustrate pairwise parameters fig, ax = plt.subplots(1,1, figsize=[10,4]) ax, cb = mavenn.heatmap(theta_add_df, ax=ax, seq=wt_seq, ccenter=0, #mask_dict=mask_dict, missing_values=0) theta_pair_df = model.get_gpmap_parameters(which='pairwise') theta_pair_df.head() all(theta_pair_df['l2'] - theta_pair_df['l1'] == 1) # Illustrate pairwise parameters fig, ax = plt.subplots(1,1, figsize=[10,4]) ax, cb = mavenn.heatmap_pairwise(theta_pair_df, ccenter=0, ax=ax, seq=wt_seq, mask_dict=mask_dict) ```
github_jupyter
# Standard imports import pandas as pd import matplotlib.pyplot as plt import numpy as np import re import seaborn as sns import time import tensorflow as tf from tensorflow.keras.backend import get_value %matplotlib inline # Insert mavenn at beginning of path import sys path_to_mavenn_local = '../../' sys.path.insert(0, path_to_mavenn_local) #Load mavenn and check path import mavenn print(mavenn.__path__) # Import dataset splitter from sklearn from sklearn.model_selection import train_test_split # Load dataset as a dataframe data_df = mavenn.load_example_dataset('mpsa') # Extract x and y as np.arrays x = data_df['x'].values y = data_df['y'].values # Split into training and test sets x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=0) # Define a model with a pairwise G-P map # a heteroskedastic Gaussian GE measurement process, # and specify the training data. mavenn.set_seed(0) model = mavenn.Model(x=x_train, y=y_train, gpmap_type='pairwise', alphabet='dna', regression_type='GE', ge_noise_model_type='SkewedT', ge_nonlinearity_monotonic=True, ge_heteroskedasticity_order=2) # Fit model to training data start_time = time.time() model.fit(epochs=10, learning_rate=0.005, early_stopping=True, early_stopping_patience=20) training_time = time.time()-start_time print(f'training time: {training_time:.1f} seconds') model.arg_dict.keys() # Save model model.save('mpsa') # Load model model = mavenn.load('mpsa') model.get_nn().summary() # Predict latent phentoype values (phi) on test data phi_test = model.x_to_phi(x_test) # Predict measurement values (yhat) on test data yhat_test = model.x_to_yhat(x_test) # Compute R^2 between yhat and y_test Rsq = np.corrcoef(yhat_test.ravel(), y_test)[0, 1]**2 # Set phi lims and create grid in phi space phi_lim = [min(phi_test)-.5, max(phi_test)+.5] phi_grid = np.linspace(phi_lim[0], phi_lim[1], 1000) # Compute yhat each phi gridpoint yhat_grid = model.phi_to_yhat(phi_grid) # Compute 68% CI for each yhat yqs_grid = model.yhat_to_yq(yhat_grid, q=[0.16, 0.84]) # Create figure and axes fig, axs = plt.subplots(1, 2, figsize=[8, 4]) # Left panel: illustrate measurement process (y vs. phi) ax = axs[0] ax.scatter(phi_test, y_test, color='C0', s=5, alpha=.2, label='test data') ax.plot(phi_grid, yhat_grid, linewidth=2, color='C1', label='$\hat{y} = g(\phi)$') ax.plot(phi_grid, yqs_grid[:, 0], linestyle='--', color='C1', label='68% CI') ax.plot(phi_grid, yqs_grid[:, 1], linestyle='--', color='C1') ax.set_xlim(phi_lim) ax.set_xlabel('latent phenotype ($\phi$)') ax.set_ylabel('measurement ($y$)') ax.set_title('measurement process') ax.legend() # Center panel: illustrate model performance (y vs. yhat) ax = axs[1] #ys = np.vstack([y_test]) ax.scatter(yhat_test, y_test, color='C0', s=5, alpha=.2, label='test data') #ax.set_autoscale_on(False) lims = ax.get_xlim() ax.plot(lims, lims, linestyle=':', color='k', label='$y=\hat{y}$') ax.set_xlabel('model prediction ($\hat{y}$)') ax.set_ylabel('measurement ($y$)') ax.set_title(f'performance ($R^2$={Rsq:.3})') ax.legend() # Tighten bounds on figure fig.tight_layout(w_pad=3) plt.show() # Compute mask_dict from trainig data mask_dict = mavenn.get_mask_dict(x_train, alphabet='dna') mask_dict wt_seq = mavenn.x_to_consensus(x_train) theta_add_df = model.get_gpmap_parameters(which='additive') theta_add_df.head() # Illustrate pairwise parameters fig, ax = plt.subplots(1,1, figsize=[10,4]) ax, cb = mavenn.heatmap(theta_add_df, ax=ax, seq=wt_seq, ccenter=0, #mask_dict=mask_dict, missing_values=0) theta_pair_df = model.get_gpmap_parameters(which='pairwise') theta_pair_df.head() all(theta_pair_df['l2'] - theta_pair_df['l1'] == 1) # Illustrate pairwise parameters fig, ax = plt.subplots(1,1, figsize=[10,4]) ax, cb = mavenn.heatmap_pairwise(theta_pair_df, ccenter=0, ax=ax, seq=wt_seq, mask_dict=mask_dict)
0.723602
0.742702
# A detailed look at the depth-age relationship for the seafloor. We are going to work with the ETOPO1 dataset (Amante et al) which we can download from various services online when we need it. You can read more about the dataset here: https://ngdc.noaa.gov/mgg/global/ ![color_etopo1_ice_low_400.gif](Images/color_etopo1_ice_low_400.gif) We need some libraries to help with downloading, manipulating and plotting the data: - `numpy` to manipulate arrays - `xarray` which extends `numpy` for data that might be too big to read all at once - `matplotlib` and `cartopy` for plotting data on maps ## Navigation - [Maps 1.1](PHYS3070-LabMD.1.1.ipynb) - [Maps 1.2](PHYS3070-LabMD.1.2.ipynb) - [Maps 1.3](PHYS3070-LabMD.1.3.ipynb) - [Maps 2.1](PHYS3070-LabMD.2.1.ipynb) - [Maps 2.2](PHYS3070-LabMD.2.2.ipynb) - [Maps 2.3](PHYS3070-LabMD.2.3.ipynb) - [Maps 2.4](PHYS3070-LabMD.2.4.ipynb) - [Maps 2.5](PHYS3070-LabMD.2.5.ipynb) ``` import numpy as np import xarray import matplotlib.pyplot as plt from matplotlib import cm # colourmaps %matplotlib inline ``` ### References Amante, C. “ETOPO1 1 Arc-Minute Global Relief Model: Procedures, Data Sources and Analysis.” National Geophysical Data Center, NOAA, 2009. https://doi.org/10.7289/V5C8276M. ## Read ETOPO data from a remote service This is how we access the data - provide a url, open that url, and ask for a subset of the data (either by region or by taking every n'th value) ``` python etopo_dataset = "http://thredds.socib.es/thredds/dodsC/ancillary_data/bathymetry/ETOPO1_Bed_g_gmt4.nc" etopo_data = xarray.open_dataset(etopo_dataset) subs_data = etopo_data.sel(x=slice(left,right, 30), y=slice(bottom, top, 30)) ``` Here we have requested every 30th data point. ``` (left, bottom, right, top) = (-180, -90, 180, 90) map_extent = ( left, right, bottom, top) etopo_dataset = "http://thredds.socib.es/thredds/dodsC/ancillary_data/bathymetry/ETOPO1_Bed_g_gmt4.nc" etopo_data = xarray.open_dataset(etopo_dataset, engine="netcdf4") subs_data = etopo_data.sel(x=slice(left,right, 300), y=slice(bottom, top, 300)) lons = subs_data.coords.get('x') lats = subs_data.coords.get('y') vals = subs_data['z'] x,y = np.meshgrid(lons.data, lats.data) height = vals.data ``` ## Validation Can you check to see what resolution data we have downloaded ? (hint *the `height` data is a numpy array and has a `shape` attribute*) Check here: ``` print("The shape of the array is ... ") ``` and we should plot the data to see if it matches the image above and whether we need more resolution. Does that look right ? If the map is horribly pixelated, we might try downloading more data. Don't go mad or it will take forever. ``` import cartopy.crs as ccrs import cartopy.feature as cfeature coastline = cfeature.NaturalEarthFeature('physical', 'coastline', '10m', edgecolor=(1.0,0.8,0.0), facecolor="none") plt.figure(figsize=(15, 10)) ax = plt.subplot(111, projection=ccrs.PlateCarree()) ax.set_extent(map_extent) ax.add_feature(coastline, edgecolor="black", linewidth=0.5, zorder=3) plt.imshow(height, extent=map_extent, transform=ccrs.PlateCarree(), cmap='terrain', origin='upper', vmin=-5000., vmax=5000.) ```
github_jupyter
import numpy as np import xarray import matplotlib.pyplot as plt from matplotlib import cm # colourmaps %matplotlib inline Here we have requested every 30th data point. ## Validation Can you check to see what resolution data we have downloaded ? (hint *the `height` data is a numpy array and has a `shape` attribute*) Check here: and we should plot the data to see if it matches the image above and whether we need more resolution. Does that look right ? If the map is horribly pixelated, we might try downloading more data. Don't go mad or it will take forever.
0.331661
0.981293
# exploring itertools - itertools https://docs.python.org/3/library/itertools.html - more itertools https://more-itertools.readthedocs.io/en/stable/index.html ## itertools ``` import itertools as it inf=['count', 'cycle', 'repeat'] iter_short=['accumulate', 'chain', 'chain.from_iterable', 'compress', 'dropwhile', 'filterfalse', 'groupby', 'islice', 'pairwise', 'starmap', 'takewhile', 'tee', 'zip_longest'] combinatoric = ['product', 'permutations', 'combinations', 'combinations_with_replacement'] notes="pairwise is new in vesrion 3.10 but exists in more-itertools for earlier versions" recipes=['take', 'prepend', 'tabulate', 'tail', 'consume', 'nth', 'all_equal', 'quantify', 'pad_none', 'ncycles', 'flatten', 'repeatfunc', 'grouper', 'triplewise', 'sliding_window', 'roundrobin', 'partition', 'before_and_after', 'powerset', 'unique_everseen', 'unique_justseen', 'iter_except', 'first_true', 'random_product', 'random_combination_with_replacement', 'nth_combination'] for x in combinatoric: print(x) help(it.__dict__[x]) for x in inf: print(x) help(it.__dict__[x]) for x in iter_short: print(x) help(it.__dict__[x]) help(it.chain.from_iterable) for x in combinatoric: print(x) help(it.__dict__[x]) ``` ## more_itertools ``` import more_itertools as mi grouping="chunked, ichunked, sliced, distribute, divide, split_at, split_before, split_after, split_into, split_when, bucket, unzip, grouper, partition" look_ahead_behind="spy, peekable, seekable" windowing="windowed, substrings, substrings_indexes, stagger, windowed_complete, pairwise, triplewise, sliding_window" Augmenting="count_cycle, intersperse, padded, mark_ends, repeat_last, adjacent, groupby_transform, pad_none, ncycles" Combining="collapse, sort_together, interleave, interleave_longest, interleave_evenly, zip_offset, zip_equal, zip_broadcast, dotproduct, convolve, flatten, roundrobin, prepend, value_chain" Summarizing="ilen, unique_to_each, sample, consecutive_groups, run_length, map_reduce, exactly_n, is_sorted, all_equal, all_unique, minmax, first_true, quantify" Selecting="islice_extended, first, last, one, only, strictly_n, strip, lstrip, rstrip, filter_except, map_except, nth_or_last, unique_in_window, before_and_after, nth, take, tail, unique_everseen, unique_justseen, duplicates_everseen, duplicates_justseen" Combinatorics="distinct_permutations, distinct_combinations, circular_shifts, partitions, set_partitions, product_index, combination_index, permutation_index, powerset, random_product, random_permutation, random_combination, random_combination_with_replacement, nth_product, nth_permutation, nth_combination" Wrapping="always_iterable, always_reversible, countable, consumer, with_iter, iter_except" Others = "locate, rlocate, replace, numeric_range, side_effect, iterate, difference, make_decorator, SequenceView, time_limited, consume, tabulate, repeatfunc" names = 'grouping, look_ahead_behind, windowing, Augmenting, Combining, Summarizing, Selecting, Combinatorics, Wrapping, Others'.replace(',','').split() names recipes_in_more = [] recipes_in_more.extend([x for x in grouping.replace(",","").split() if x in recipes]) recipes_in_more.extend([x for x in look_ahead_behind.replace(",","").split() if x in recipes]) recipes_in_more.extend([x for x in windowing.replace(",","").split() if x in recipes]) recipes_in_more.extend([x for x in Augmenting.replace(",","").split() if x in recipes]) recipes_in_more.extend([x for x in Combining.replace(",","").split() if x in recipes]) recipes_in_more.extend([x for x in Summarizing.replace(",","").split() if x in recipes]) recipes_in_more.extend([x for x in Selecting.replace(",","").split() if x in recipes]) recipes_in_more.extend([x for x in Combinatorics.replace(",","").split() if x in recipes]) recipes_in_more.extend([x for x in Wrapping.replace(",","").split() if x in recipes]) recipes_in_more.extend([x for x in Others.replace(",","").split() if x in recipes]) recipes_in_more set(recipes)-set(recipes_in_more) for n in [x.strip() for x in grouping.split(',')]: print(n) help(mi.__dict__[n]) for n in [x.strip() for x in look_ahead_behind.split(',')]: print(n) help(mi.__dict__[n]) for n in [x.strip() for x in windowing.split(',')]: print(n) help(mi.__dict__[n]) for n in [x.strip() for x in Augmenting.split(',')]: print(n) help(mi.__dict__[n]) for n in [x.strip() for x in Combining.split(',')]: print(n) help(mi.__dict__[n]) for n in [x.strip() for x in Summarizing.split(',')]: print(n) help(mi.__dict__[n]) for n in [x.strip() for x in Selecting.split(',')]: print(n) help(mi.__dict__[n]) for n in [x.strip() for x in Combinatorics.split(',')]: print(n) help(mi.__dict__[n]) re.sub? import re worditer = re.sub(r'[\*,\.]','', "Slices with negative values require some caching of *iterable*, but this").lower().split() print(repr(worditer)) import random random.shuffle(worditer) print(repr(worditer)) from queue import i = [1,2,78, 'fred', ['nested', 'list', 1], 22.56] mi.random_combination_with_replacement(i, 5) for n in [x.strip() for x in Wrapping.split(',')]: print(n) help(mi.__dict__[n]) for n in [x.strip() for x in Others.split(',')]: print(n) help(mi.__dict__[n]) ```
github_jupyter
import itertools as it inf=['count', 'cycle', 'repeat'] iter_short=['accumulate', 'chain', 'chain.from_iterable', 'compress', 'dropwhile', 'filterfalse', 'groupby', 'islice', 'pairwise', 'starmap', 'takewhile', 'tee', 'zip_longest'] combinatoric = ['product', 'permutations', 'combinations', 'combinations_with_replacement'] notes="pairwise is new in vesrion 3.10 but exists in more-itertools for earlier versions" recipes=['take', 'prepend', 'tabulate', 'tail', 'consume', 'nth', 'all_equal', 'quantify', 'pad_none', 'ncycles', 'flatten', 'repeatfunc', 'grouper', 'triplewise', 'sliding_window', 'roundrobin', 'partition', 'before_and_after', 'powerset', 'unique_everseen', 'unique_justseen', 'iter_except', 'first_true', 'random_product', 'random_combination_with_replacement', 'nth_combination'] for x in combinatoric: print(x) help(it.__dict__[x]) for x in inf: print(x) help(it.__dict__[x]) for x in iter_short: print(x) help(it.__dict__[x]) help(it.chain.from_iterable) for x in combinatoric: print(x) help(it.__dict__[x]) import more_itertools as mi grouping="chunked, ichunked, sliced, distribute, divide, split_at, split_before, split_after, split_into, split_when, bucket, unzip, grouper, partition" look_ahead_behind="spy, peekable, seekable" windowing="windowed, substrings, substrings_indexes, stagger, windowed_complete, pairwise, triplewise, sliding_window" Augmenting="count_cycle, intersperse, padded, mark_ends, repeat_last, adjacent, groupby_transform, pad_none, ncycles" Combining="collapse, sort_together, interleave, interleave_longest, interleave_evenly, zip_offset, zip_equal, zip_broadcast, dotproduct, convolve, flatten, roundrobin, prepend, value_chain" Summarizing="ilen, unique_to_each, sample, consecutive_groups, run_length, map_reduce, exactly_n, is_sorted, all_equal, all_unique, minmax, first_true, quantify" Selecting="islice_extended, first, last, one, only, strictly_n, strip, lstrip, rstrip, filter_except, map_except, nth_or_last, unique_in_window, before_and_after, nth, take, tail, unique_everseen, unique_justseen, duplicates_everseen, duplicates_justseen" Combinatorics="distinct_permutations, distinct_combinations, circular_shifts, partitions, set_partitions, product_index, combination_index, permutation_index, powerset, random_product, random_permutation, random_combination, random_combination_with_replacement, nth_product, nth_permutation, nth_combination" Wrapping="always_iterable, always_reversible, countable, consumer, with_iter, iter_except" Others = "locate, rlocate, replace, numeric_range, side_effect, iterate, difference, make_decorator, SequenceView, time_limited, consume, tabulate, repeatfunc" names = 'grouping, look_ahead_behind, windowing, Augmenting, Combining, Summarizing, Selecting, Combinatorics, Wrapping, Others'.replace(',','').split() names recipes_in_more = [] recipes_in_more.extend([x for x in grouping.replace(",","").split() if x in recipes]) recipes_in_more.extend([x for x in look_ahead_behind.replace(",","").split() if x in recipes]) recipes_in_more.extend([x for x in windowing.replace(",","").split() if x in recipes]) recipes_in_more.extend([x for x in Augmenting.replace(",","").split() if x in recipes]) recipes_in_more.extend([x for x in Combining.replace(",","").split() if x in recipes]) recipes_in_more.extend([x for x in Summarizing.replace(",","").split() if x in recipes]) recipes_in_more.extend([x for x in Selecting.replace(",","").split() if x in recipes]) recipes_in_more.extend([x for x in Combinatorics.replace(",","").split() if x in recipes]) recipes_in_more.extend([x for x in Wrapping.replace(",","").split() if x in recipes]) recipes_in_more.extend([x for x in Others.replace(",","").split() if x in recipes]) recipes_in_more set(recipes)-set(recipes_in_more) for n in [x.strip() for x in grouping.split(',')]: print(n) help(mi.__dict__[n]) for n in [x.strip() for x in look_ahead_behind.split(',')]: print(n) help(mi.__dict__[n]) for n in [x.strip() for x in windowing.split(',')]: print(n) help(mi.__dict__[n]) for n in [x.strip() for x in Augmenting.split(',')]: print(n) help(mi.__dict__[n]) for n in [x.strip() for x in Combining.split(',')]: print(n) help(mi.__dict__[n]) for n in [x.strip() for x in Summarizing.split(',')]: print(n) help(mi.__dict__[n]) for n in [x.strip() for x in Selecting.split(',')]: print(n) help(mi.__dict__[n]) for n in [x.strip() for x in Combinatorics.split(',')]: print(n) help(mi.__dict__[n]) re.sub? import re worditer = re.sub(r'[\*,\.]','', "Slices with negative values require some caching of *iterable*, but this").lower().split() print(repr(worditer)) import random random.shuffle(worditer) print(repr(worditer)) from queue import i = [1,2,78, 'fred', ['nested', 'list', 1], 22.56] mi.random_combination_with_replacement(i, 5) for n in [x.strip() for x in Wrapping.split(',')]: print(n) help(mi.__dict__[n]) for n in [x.strip() for x in Others.split(',')]: print(n) help(mi.__dict__[n])
0.300643
0.567397
### Technical Interview define `find_missing_nums` - empty list - duplicated numbers or negative - start or end might not be in the list #### Axes Mapping - communication: ask questions - verification: tricky problem with many edge cases to check against - coding: built-in search methods and **brute force** - problem-solving: ambiguity and complexity #### Solutions: 1. Brute Force: time-complexity Because `not in` uses a linear scan to iterate through the entire list; similarly in other languages: `includes, contains, indexOF, deleteCharAt` 2. Trade Space for Time: $O(n)$ is time, where n = end-start; space is the arr sets and hashmaps: Too much space and memory Set stores only the single item. 3. Binary Search n square time is the fastest? Binary search can be done in a tree, but also in a linear array; and much faster. n is the length and m is the size of arr; time is $nlog(m)$, space is 1 4. Two Pointers ``` def find_missing_nums(ls, start, end): #ls = ls.sort() ls1 = list(range(start, end+1)) ls2 = [] for i in ls1: if i not in ls: ls2.append(i) # hyphens: return ls2 arr = [1,3,5,7,8,9,13] find_missing_nums(arr, 5, 12) # Brute Force # O(end-start * len(arr)) -> O(N*M) def print_missing_nums(arr, start, end): for n in range(start, end+1): # if n not in arr: print(n) # Trade Space for Time def print_missing_nums(arr, start, end): arr_set = set(arr) # set lookup is O(1) for n in range(start, end+1): if n not in arr: print(n) print_missing_nums(arr, 5, 12) # Binary Search def binary_search(arr, target): 1 , r = 0, len(arr) - 1 while l + 1 < r: mid = (1+r) // 2 if arr[mid] == target: return True if arr[mid] < target: 1 = mid else: r = mid if arr[1] == target: return True if arr[r] == target: return True return False def print_missing_nums(arr, start, end): for num in range(start, end + 1): if not binary_search(arr, num): print(num) # Two Pointers def printNumbers(start, end): if start == end: print(start) else: if start < 0: print("(" + str(start) + ")-", end = "") else: print(str(start) + "-", end = "") if end < 0: print("(" + str(end) + ")-", end = "") else: print(end) def printRangeWithHyphens(nums, start, end): next = start for num in nums: if num < start: continue if num > end: printNumbers(next, end) return if num > next: printNumbers(next, num - 1) next = num + 1 if next <= end: printNumbers(next, end) printRangeWithHyphens(arr, 5, 12) ``` ## Data Incubator Challenge * **SP Tian** * **July 25, 2019** ### Section 3: #### Project working on: [link](http://api.deltaneutral.net/api/free?dt=2018-01-04&result=json) ### Section 1: The City of Baltimore maintains a database of parking citations issued within the city. More information about the dataset can be found [here](https://data.baltimorecity.gov/Transportation/Parking-Citations/n4ma-fj3m). You can download the dataset as a CSV file here. Unless stated otherwise, you should only consider citations written before January 1, 2019. ### Section 2: A knight $(a,b)$ may move on an $n*n$ grid by moving a spaces horizontally and $b$ spaces vertically or $b$ spaces horizontally and $a$ spaces vertically. In other words, a knight $(a,b)$ located at a space in the grid $(x,y)$ may move to a space $(x±a,y±b)$ or $(x±b,y±a)$. To answer the following questions, you will need to determine the shortest path a knight $(a,b)$ may take from space (0,0) in the upper-left corner to space $(n−1,n−1)$ in the lower-right corner on an $n*n$ grid. ``` def chess(a,b,x,y): pass ``` 1. For $n=5$, how many moves are in the shortest path for knight $(1,2)$? 2. For $n=5$, what is the sum of the number of moves for the shortest paths for all knights with $a<=b$ ? 3. For $n=25$, how many knights with $0<a<=b$ cannot reach (24,24) ? 4. For $n=1000$, how many moves are in the shortest path for knight $(13,23)$ ? 5. For $n=5$, how many knights with $0<a<=b$ cannot reach (4,4)? 6. For $n=25$, how many moves are in the shortest path for knight $(4,7)$ ? 7. For $n=25$, what is the sum of the number of moves for the shortest paths for all knights with $a<=b$? 8. For $n=10000$, how many moves are in the shortest path for knight $(73,101)$ ? ``` import numpy as np import pandas as pd import matplotlib as plt filepath = "/Users/apple/Downloads/Parking_Citations.csv" df = pd.read_csv(filepath) df.shape df.columns df.head(10) def mean(series): try: total = sum(series) number = len(series) result = total / number except ZeroDivisionError as err: print(err) return result mean(df['ViolFine']) df['PoliceDistrict'].unique() parking = df.copy() df.loc[df['PoliceDistrict'] == 'NORTHERN', 'PoliceDistrict'] = 'Northern' df.loc[df['PoliceDistrict'] == 'SOUTHERN', 'PoliceDistrict'] = 'Southern' df.loc[df['PoliceDistrict'] == 'SOUTHWESTERN', 'PoliceDistrict'] = 'Southwestern' df.loc[df['PoliceDistrict'] == 'SOUTHEASTERN', 'PoliceDistrict'] = 'Southeastern' df.loc[df['PoliceDistrict'] == 'NORTHWESTERN', 'PoliceDistrict'] = 'Northwestern' df.loc[df['PoliceDistrict'] == 'CENTRAL', 'PoliceDistrict'] = 'Central' df.loc[df['PoliceDistrict'] == 'WESTERN', 'PoliceDistrict'] = 'Western' df.loc[df['PoliceDistrict'] == 'EASTERN', 'PoliceDistrict'] = 'Eastern' df.loc[df['PoliceDistrict'] == 'NORTHEASTERN', 'PoliceDistrict'] = 'Notheastern' df['PoliceDistrict'].unique() mean(df.loc[df['PoliceDistrict'] == 'Northern', 'ViolFine']) mean(df.loc[df['PoliceDistrict'] == 'Southern', 'ViolFine']) mean(df.loc[df['PoliceDistrict'] == 'Southwestern', 'ViolFine']) mean(df.loc[df['PoliceDistrict'] == 'Southeastern', 'ViolFine']) mean(df.loc[df['PoliceDistrict'] == 'Central', 'ViolFine']) mean(df.loc[df['PoliceDistrict'] == 'Western', 'ViolFine']) mean(df.loc[df['PoliceDistrict'] == 'Eastern', 'ViolFine']) mean(df.loc[df['PoliceDistrict'] == 'Northwestern', 'ViolFine']) mean(df.loc[df['PoliceDistrict'] == 'Notheastern', 'ViolFine']) len(df.loc[df['PoliceDistrict'] == 'Notheastern', 'ViolFine']) len(parking.loc[parking['PoliceDistrict'] == 'NORTHEASTERN']) len(parking.loc[parking['PoliceDistrict'] == 'Notheastern']) from datetime import datetime #index = pd.date_range(2004, 2014, freq = 'A') #index = pd.period_range(2004, 2014, freq = 'A') df['ViolDate'] = pd.to_datetime(df['ViolDate']) df = df.set_index('ViolDate') df from sklearn.linear_model import LinearRegression for i in range(2004, 2015): df[i] = len(df[str(i)]) x = np.arange(2004,2015).reshape(-1,1) y = np.array([s for s in df.values()]) model = LinearRegression() model.fit(x, y) print('slope:', model.coef_) open_fee = df.loc[df['OpenPenalty'] != 0] open_fee.quantile([.81], axis = 0) def sort_make(make=[]): x = (df['Make'].unique()) for i in x: make.append(str(i)) make.sort() return make len(make) == len(set(make)) sort_make() # Sorry, I'm really ignorant of car brands and had no approach of distuishing and cleaning the 'Make'. ``` 1. Determine how many instances of auto theft ocurred in each police district during 2015. 2. Determine the number of parking citations that were issued in each police district during the same year. 3. Finally, determine the ratio of auto thefts to parking citations for each district. Out of the nine police districts, what was the **highest ratio**? ``` crime = pd.read_csv("/Users/apple/Downloads/BPD_Part_1_Victim_Based_Crime_Data.csv") crime.shape crime.head(10) parking.head(10) crime['District'].unique() dist = crime['PoliceDistrict'] == "Northwestern" crime.loc[crime['PoliceDistrict'] == 'Northwestern', 'ViolFine'] sum(crime.loc[crime['District'] == 'EASTERN', 'Total Incidents']) sum(crime.loc[crime['District'] == 'NORTHERN', 'Total Incidents']) sum(crime.loc[crime['District'] == 'WESTERN', 'Total Incidents']) sum(crime.loc[crime['District'] == 'SOUTHERN', 'Total Incidents']) sum(crime.loc[crime['District'] == 'SOUTHWEST', 'Total Incidents']) sum(crime.loc[crime['District'] == 'NORTHWEST', 'Total Incidents']) sum(crime.loc[crime['District'] == 'CENTRAL', 'Total Incidents']) sum(crime.loc[crime['District'] == 'NORTHEAST', 'Total Incidents']) sum(crime.loc[crime['District'] == 'SOUTHEAST', 'Total Incidents']) ```
github_jupyter
def find_missing_nums(ls, start, end): #ls = ls.sort() ls1 = list(range(start, end+1)) ls2 = [] for i in ls1: if i not in ls: ls2.append(i) # hyphens: return ls2 arr = [1,3,5,7,8,9,13] find_missing_nums(arr, 5, 12) # Brute Force # O(end-start * len(arr)) -> O(N*M) def print_missing_nums(arr, start, end): for n in range(start, end+1): # if n not in arr: print(n) # Trade Space for Time def print_missing_nums(arr, start, end): arr_set = set(arr) # set lookup is O(1) for n in range(start, end+1): if n not in arr: print(n) print_missing_nums(arr, 5, 12) # Binary Search def binary_search(arr, target): 1 , r = 0, len(arr) - 1 while l + 1 < r: mid = (1+r) // 2 if arr[mid] == target: return True if arr[mid] < target: 1 = mid else: r = mid if arr[1] == target: return True if arr[r] == target: return True return False def print_missing_nums(arr, start, end): for num in range(start, end + 1): if not binary_search(arr, num): print(num) # Two Pointers def printNumbers(start, end): if start == end: print(start) else: if start < 0: print("(" + str(start) + ")-", end = "") else: print(str(start) + "-", end = "") if end < 0: print("(" + str(end) + ")-", end = "") else: print(end) def printRangeWithHyphens(nums, start, end): next = start for num in nums: if num < start: continue if num > end: printNumbers(next, end) return if num > next: printNumbers(next, num - 1) next = num + 1 if next <= end: printNumbers(next, end) printRangeWithHyphens(arr, 5, 12) def chess(a,b,x,y): pass import numpy as np import pandas as pd import matplotlib as plt filepath = "/Users/apple/Downloads/Parking_Citations.csv" df = pd.read_csv(filepath) df.shape df.columns df.head(10) def mean(series): try: total = sum(series) number = len(series) result = total / number except ZeroDivisionError as err: print(err) return result mean(df['ViolFine']) df['PoliceDistrict'].unique() parking = df.copy() df.loc[df['PoliceDistrict'] == 'NORTHERN', 'PoliceDistrict'] = 'Northern' df.loc[df['PoliceDistrict'] == 'SOUTHERN', 'PoliceDistrict'] = 'Southern' df.loc[df['PoliceDistrict'] == 'SOUTHWESTERN', 'PoliceDistrict'] = 'Southwestern' df.loc[df['PoliceDistrict'] == 'SOUTHEASTERN', 'PoliceDistrict'] = 'Southeastern' df.loc[df['PoliceDistrict'] == 'NORTHWESTERN', 'PoliceDistrict'] = 'Northwestern' df.loc[df['PoliceDistrict'] == 'CENTRAL', 'PoliceDistrict'] = 'Central' df.loc[df['PoliceDistrict'] == 'WESTERN', 'PoliceDistrict'] = 'Western' df.loc[df['PoliceDistrict'] == 'EASTERN', 'PoliceDistrict'] = 'Eastern' df.loc[df['PoliceDistrict'] == 'NORTHEASTERN', 'PoliceDistrict'] = 'Notheastern' df['PoliceDistrict'].unique() mean(df.loc[df['PoliceDistrict'] == 'Northern', 'ViolFine']) mean(df.loc[df['PoliceDistrict'] == 'Southern', 'ViolFine']) mean(df.loc[df['PoliceDistrict'] == 'Southwestern', 'ViolFine']) mean(df.loc[df['PoliceDistrict'] == 'Southeastern', 'ViolFine']) mean(df.loc[df['PoliceDistrict'] == 'Central', 'ViolFine']) mean(df.loc[df['PoliceDistrict'] == 'Western', 'ViolFine']) mean(df.loc[df['PoliceDistrict'] == 'Eastern', 'ViolFine']) mean(df.loc[df['PoliceDistrict'] == 'Northwestern', 'ViolFine']) mean(df.loc[df['PoliceDistrict'] == 'Notheastern', 'ViolFine']) len(df.loc[df['PoliceDistrict'] == 'Notheastern', 'ViolFine']) len(parking.loc[parking['PoliceDistrict'] == 'NORTHEASTERN']) len(parking.loc[parking['PoliceDistrict'] == 'Notheastern']) from datetime import datetime #index = pd.date_range(2004, 2014, freq = 'A') #index = pd.period_range(2004, 2014, freq = 'A') df['ViolDate'] = pd.to_datetime(df['ViolDate']) df = df.set_index('ViolDate') df from sklearn.linear_model import LinearRegression for i in range(2004, 2015): df[i] = len(df[str(i)]) x = np.arange(2004,2015).reshape(-1,1) y = np.array([s for s in df.values()]) model = LinearRegression() model.fit(x, y) print('slope:', model.coef_) open_fee = df.loc[df['OpenPenalty'] != 0] open_fee.quantile([.81], axis = 0) def sort_make(make=[]): x = (df['Make'].unique()) for i in x: make.append(str(i)) make.sort() return make len(make) == len(set(make)) sort_make() # Sorry, I'm really ignorant of car brands and had no approach of distuishing and cleaning the 'Make'. crime = pd.read_csv("/Users/apple/Downloads/BPD_Part_1_Victim_Based_Crime_Data.csv") crime.shape crime.head(10) parking.head(10) crime['District'].unique() dist = crime['PoliceDistrict'] == "Northwestern" crime.loc[crime['PoliceDistrict'] == 'Northwestern', 'ViolFine'] sum(crime.loc[crime['District'] == 'EASTERN', 'Total Incidents']) sum(crime.loc[crime['District'] == 'NORTHERN', 'Total Incidents']) sum(crime.loc[crime['District'] == 'WESTERN', 'Total Incidents']) sum(crime.loc[crime['District'] == 'SOUTHERN', 'Total Incidents']) sum(crime.loc[crime['District'] == 'SOUTHWEST', 'Total Incidents']) sum(crime.loc[crime['District'] == 'NORTHWEST', 'Total Incidents']) sum(crime.loc[crime['District'] == 'CENTRAL', 'Total Incidents']) sum(crime.loc[crime['District'] == 'NORTHEAST', 'Total Incidents']) sum(crime.loc[crime['District'] == 'SOUTHEAST', 'Total Incidents'])
0.209227
0.845496
# Custom PCA and t-SNE Note the large similarity with the `projector` notebook! ## Dependencies Using the scikit-learn library for the plot-creation. ``` from __future__ import print_function import os import time import numpy as np import pandas as pd from sklearn.decomposition import PCA from sklearn.manifold import TSNE %matplotlib inline import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import seaborn as sns import matplotlib.gridspec as gridspec ``` ## Data ### Hyperparameters ``` LOG_DIR = './population_backup/storage/experiment6/' topology_id = 3 overwrite = True rm_duplicates = False # Remove all samples with at least 'dup_columns' duplicate values dup_columns = None # Number of duplicated columns a single sample has before removal filter_score = True # Filter out samples having a fitness of min_fitness or more classify = True # Classify the samples based on the connections if topology_id in [2,3]: min_fitness = 1 # GRU is capable of finding all targets elif topology_id in [22]: min_fitness = 0.5 # Best score of 11/18 else: raise Exception(f"Add topology ID '{topology_id}'") ``` ### Fetch ``` # Setup the header head = [] if topology_id in [1, 2, 3]: # GRU populations head += ['bias_r', 'bias_z', 'bias_h', 'weight_xr', 'weight_xz', 'weight_xh', 'weight_hr', 'weight_hz', 'weight_hh'] elif topology_id in [22, 33]: # SRU populations head += ['bias_h', 'weight_xh', 'weight_hh'] else: raise Exception(f"Topology ID '{topology_id}' not supported!") if topology_id in [1]: head += ['conn1', 'conn2'] elif topology_id in [2, 22]: head += ['bias_rw', 'conn2'] elif topology_id in [3, 33]: head += ['bias_rw', 'conn0', 'conn1', 'conn2'] else: raise Exception(f"Topology ID '{topology_id}' not supported!") # Check if tsv files already exist raw_path = os.path.join(LOG_DIR, f'topology_{topology_id}/data/topology_{topology_id}.csv') data_path = os.path.join(LOG_DIR, f'topology_{topology_id}/data/data.tsv') meta_path = os.path.join(LOG_DIR, f'topology_{topology_id}/data/meta.tsv') # Load in the data (without header) if not overwrite and os.path.exists(data_path): data = np.genfromtxt(data_path, delimiter='\t') meta = np.genfromtxt(meta_path, delimiter='\t') else: raw = np.genfromtxt(raw_path, delimiter=',')[1:] data = raw[:,:-1] meta = raw[:,-1] np.savetxt(data_path, data, delimiter='\t') np.savetxt(meta_path, meta, delimiter='\t') # Print shape: print(f"Data shape: {data.shape}") print(f"Meta shape: {meta.shape}") # Transform to pandas dataframe (easier to manipulate) data_pd = pd.DataFrame(data, columns=head) meta_pd = pd.DataFrame(meta, columns=['fitness']) data_pd.head() ``` ### Filter the data ``` # Filter out the complete duplicates indices = data_pd.duplicated() data_pd = data_pd[~indices.values] meta_pd = meta_pd[~indices.values] print(f"Data shape: {data_pd.shape}") print(f"Meta shape: {meta_pd.shape}") # For example, if you want to see only fitnesses of 1 (perfect score). if filter_score: indices = meta_pd >= min_fitness data_pd = data_pd[indices.values] meta_pd = meta_pd[indices.values] print(f"Data shape: {data_pd.shape}") print(f"Meta shape: {meta_pd.shape}") # Filter out all the samples that have at least one duplicate value (in each of its columns) if rm_duplicates: indices = (meta_pd<0).astype(int).values.flatten() # Little hack for h in head[1:]: indices += data_pd.duplicated(subset=h).astype(int).values # Remove all that exceed the set threshold data_pd = data_pd[indices < dup_columns] meta_pd = meta_pd[indices < dup_columns] print(f"Dropping duplicates that occur in {dup_columns} columns or more") print(f" > Data shape: {data_pd.shape}") print(f" > Meta shape: {meta_pd.shape}") ``` ### Visualize the data ``` plt.figure(figsize=(15,3)) data_pd.boxplot() plt.show() plt.close() def adapt_and_show(data, indices=None): data_temp = data if indices is not None: data_temp = data_temp[indices.values] print(f"Size: {data_temp.shape}") plt.figure(figsize=(15,5)) for i, h in enumerate(head): plt.subplot(int(len(head)/6+1),6,i+1) sns.violinplot(data_temp[h]) plt.title(h) if 'bias' in h: plt.xlim(-3,3) else: plt.xlim(-6,6) plt.yticks([]) plt.tight_layout() plt.show() plt.close() # indices = (data_pd['conn1'] >= 3) & (data_pd['conn0'] >= 3) indices = None adapt_and_show(data_pd, indices) # PP indices = (data_pd['conn1'] >= 0) & (data_pd['conn0'] >= 0) adapt_and_show(data_pd, indices) # PN indices = (data_pd['conn1'] >= 0) & (data_pd['conn0'] < 0) adapt_and_show(data_pd, indices) # NP indices = (data_pd['conn1'] < 0) & (data_pd['conn0'] >= 0) adapt_and_show(data_pd, indices) # NN indices = (data_pd['conn1'] < 0) & (data_pd['conn0'] < 0) adapt_and_show(data_pd, indices) h = 'conn1' t = '$c_2$' COLORS = ['#ffffff', sns.color_palette()[0], sns.color_palette()[1], sns.color_palette()[2], sns.color_palette()[3]] plt.figure(figsize=(3,3)) gs1 = gridspec.GridSpec(5,1) gs1.update(wspace=0, hspace=0) for i in sorted([i for i in range(5)], reverse=True): plt.subplot(gs1[i]) if i == 0: plt.title(t) # Format data if i == 0: data_temp = data_pd elif i == 1: indices = (data_pd['conn0'] >= 0) & (data_pd['conn1'] >= 0) data_temp = data_pd[indices.values] elif i == 2: indices = (data_pd['conn0'] >= 0) & (data_pd['conn1'] < 0) data_temp = data_pd[indices.values] elif i == 3: indices = (data_pd['conn0'] < 0) & (data_pd['conn1'] >= 0) data_temp = data_pd[indices.values] else: indices = (data_pd['conn0'] < 0) & (data_pd['conn1'] < 0) data_temp = data_pd[indices.values] # Create the plot sns.violinplot(data_temp[h], color=COLORS[i]) if 'bias' in h: plt.xlim(-3,3) else: plt.xlim(-6,6) plt.yticks([]) plt.xlabel('') if i < 4: plt.gca().set_xticklabels([]) plt.savefig(f"delete_me/{h}.png", bbox_inches='tight', pad_inches=0.02) plt.savefig(f"delete_me/{h}.eps", format='eps', bbox_inches='tight', pad_inches=0.02) plt.show() plt.close() ``` ### Add column Used to color the plots. ``` def classify_connections(r): if r['conn0'] >= 0 and r['conn1'] >= 0: return "$c_1 \geq 0 , c_2 \geq 0$" # Positive Positive elif r['conn0'] >= 0 and r['conn1'] < 0: return "$c_1 \geq 0 , c_2 < 0$" # Positive Negative elif r['conn0'] < 0 and r['conn1'] >= 0: return "$c_1 < 0 , c_2 \geq 0$" # Negative Positive else: return "$c_1 < 0 , c_2 < 0$" # Negative Negative if classify: data_pd['classes'] = data_pd.apply(lambda row: classify_connections(row), axis=1).values data_pd.head() ``` ## PCA ``` pca = PCA(n_components=3) pca_result = pca.fit_transform(data_pd[head].values) data_pd['pca-one'] = pca_result[:,0] data_pd['pca-two'] = pca_result[:,1] data_pd['pca-three'] = pca_result[:,2] print('Explained variation per principal component: {}'.format(pca.explained_variance_ratio_)) plt.figure(figsize=(5,5)) sns.scatterplot( x="pca-one", y="pca-two", hue="classes", palette="tab10", data=data_pd, ) plt.tight_layout() legend_elements = [ Line2D([0], [0], marker='o', color=COLORS[0], label="$c_1 \geq 0 , c_2 \geq 0$", markerfacecolor=COLORS[1], markersize=10), Line2D([0], [0], marker='o', color=COLORS[0], label="$c_1 \geq 0 , c_2 < 0$", markerfacecolor=COLORS[2], markersize=10), Line2D([0], [0], marker='o', color=COLORS[0], label="$c_1 < 0 , c_2 \geq 0$", markerfacecolor=COLORS[3], markersize=10), Line2D([0], [0], marker='o', color=COLORS[0], label="$c_1 < 0 , c_2 < 0$", markerfacecolor=COLORS[4], markersize=10), ] plt.legend(handles=legend_elements, loc='center left', title=None, bbox_to_anchor=(1, 0.5), fancybox=True, fontsize=10, ncol=1) # plt.axhline(0, linewidth=0.5, color=(0.5,0.5,0.5,0.5)) # plt.axvline(0, linewidth=0.5, color=(0.5,0.5,0.5,0.5)) plt.xlim(-8,8) plt.ylim(-8,8) plt.xlabel("First principal component") plt.ylabel("Second principal component") plt.savefig(f"delete_me/pca.png", bbox_inches='tight', pad_inches=0.02) plt.savefig(f"delete_me/pca.eps", format='eps', bbox_inches='tight', pad_inches=0.02) plt.show() plt.close() # ax = plt.figure(figsize=(16,10)).gca(projection='3d') # ax.scatter( # xs=df["pca-one"], # ys=df["pca-two"], # zs=df["pca-three"], # # xs=df.loc[rndperm,:]["pca-one"], # # ys=df.loc[rndperm,:]["pca-two"], # # zs=df.loc[rndperm,:]["pca-three"], # # cmap='tab10', # # c=[int(i) for i in df.loc[rndperm,:]["y"]], # c=[(1, 1-5*(1-min(i,1)), 0, min(i,1)) for i in df["finished"]], # ) # ax.set_xlabel('pca-one') # ax.set_ylabel('pca-two') # ax.set_zlabel('pca-three') # plt.show() ``` ## t-SNE ``` df_values = data_pd[head].values pca = PCA(n_components=3) pca_result = pca.fit_transform(df_values) data_pd['pca-one'] = pca_result[:,0] data_pd['pca-two'] = pca_result[:,1] data_pd['pca-three'] = pca_result[:,2] print('Explained variation per principal component: {}'.format(pca.explained_variance_ratio_)) time_start = time.time() tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300) tsne_results = tsne.fit_transform(df_values) print('t-SNE done! Time elapsed: {} seconds'.format(time.time()-time_start)) data_pd['tsne-2d-one'] = tsne_results[:,0] data_pd['tsne-2d-two'] = tsne_results[:,1] plt.figure(figsize=(6,6)) sns.scatterplot( x="tsne-2d-one", y="tsne-2d-two", hue="classes", data=data_pd, ) plt.tight_layout() leg = plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.07), fancybox=True, fontsize=10, ncol=5) plt.show() plt.close() # time_start = time.time() # tsne = TSNE(n_components=3, verbose=1, perplexity=40, n_iter=300) # tsne_results = tsne.fit_transform(df_values) # print('t-SNE done! Time elapsed: {} seconds'.format(time.time()-time_start)) # df['tsne-3d-one'] = tsne_results[:,0] # df['tsne-3d-two'] = tsne_results[:,1] # df['tsne-3d-three'] = tsne_results[:,2] # ax = plt.figure(figsize=(16,10)).gca(projection='3d') # ax.scatter( # xs=df["tsne-3d-one"], # ys=df["tsne-3d-two"], # zs=df["tsne-3d-three"], # # xs=df.loc[rndperm,:]["pca-one"], # # ys=df.loc[rndperm,:]["pca-two"], # # zs=df.loc[rndperm,:]["pca-three"], # # cmap='tab10', # # c=[int(i) for i in df.loc[rndperm,:]["y"]], # c=[(1, 1-5*(1-min(i,1)), 0, min(i,1)) for i in df["finished"]], # ) # ax.set_xlabel('tsne-3d-one') # ax.set_ylabel('tsne-3d-two') # ax.set_zlabel('tsne-3d-three') # plt.show() ```
github_jupyter
from __future__ import print_function import os import time import numpy as np import pandas as pd from sklearn.decomposition import PCA from sklearn.manifold import TSNE %matplotlib inline import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import seaborn as sns import matplotlib.gridspec as gridspec LOG_DIR = './population_backup/storage/experiment6/' topology_id = 3 overwrite = True rm_duplicates = False # Remove all samples with at least 'dup_columns' duplicate values dup_columns = None # Number of duplicated columns a single sample has before removal filter_score = True # Filter out samples having a fitness of min_fitness or more classify = True # Classify the samples based on the connections if topology_id in [2,3]: min_fitness = 1 # GRU is capable of finding all targets elif topology_id in [22]: min_fitness = 0.5 # Best score of 11/18 else: raise Exception(f"Add topology ID '{topology_id}'") # Setup the header head = [] if topology_id in [1, 2, 3]: # GRU populations head += ['bias_r', 'bias_z', 'bias_h', 'weight_xr', 'weight_xz', 'weight_xh', 'weight_hr', 'weight_hz', 'weight_hh'] elif topology_id in [22, 33]: # SRU populations head += ['bias_h', 'weight_xh', 'weight_hh'] else: raise Exception(f"Topology ID '{topology_id}' not supported!") if topology_id in [1]: head += ['conn1', 'conn2'] elif topology_id in [2, 22]: head += ['bias_rw', 'conn2'] elif topology_id in [3, 33]: head += ['bias_rw', 'conn0', 'conn1', 'conn2'] else: raise Exception(f"Topology ID '{topology_id}' not supported!") # Check if tsv files already exist raw_path = os.path.join(LOG_DIR, f'topology_{topology_id}/data/topology_{topology_id}.csv') data_path = os.path.join(LOG_DIR, f'topology_{topology_id}/data/data.tsv') meta_path = os.path.join(LOG_DIR, f'topology_{topology_id}/data/meta.tsv') # Load in the data (without header) if not overwrite and os.path.exists(data_path): data = np.genfromtxt(data_path, delimiter='\t') meta = np.genfromtxt(meta_path, delimiter='\t') else: raw = np.genfromtxt(raw_path, delimiter=',')[1:] data = raw[:,:-1] meta = raw[:,-1] np.savetxt(data_path, data, delimiter='\t') np.savetxt(meta_path, meta, delimiter='\t') # Print shape: print(f"Data shape: {data.shape}") print(f"Meta shape: {meta.shape}") # Transform to pandas dataframe (easier to manipulate) data_pd = pd.DataFrame(data, columns=head) meta_pd = pd.DataFrame(meta, columns=['fitness']) data_pd.head() # Filter out the complete duplicates indices = data_pd.duplicated() data_pd = data_pd[~indices.values] meta_pd = meta_pd[~indices.values] print(f"Data shape: {data_pd.shape}") print(f"Meta shape: {meta_pd.shape}") # For example, if you want to see only fitnesses of 1 (perfect score). if filter_score: indices = meta_pd >= min_fitness data_pd = data_pd[indices.values] meta_pd = meta_pd[indices.values] print(f"Data shape: {data_pd.shape}") print(f"Meta shape: {meta_pd.shape}") # Filter out all the samples that have at least one duplicate value (in each of its columns) if rm_duplicates: indices = (meta_pd<0).astype(int).values.flatten() # Little hack for h in head[1:]: indices += data_pd.duplicated(subset=h).astype(int).values # Remove all that exceed the set threshold data_pd = data_pd[indices < dup_columns] meta_pd = meta_pd[indices < dup_columns] print(f"Dropping duplicates that occur in {dup_columns} columns or more") print(f" > Data shape: {data_pd.shape}") print(f" > Meta shape: {meta_pd.shape}") plt.figure(figsize=(15,3)) data_pd.boxplot() plt.show() plt.close() def adapt_and_show(data, indices=None): data_temp = data if indices is not None: data_temp = data_temp[indices.values] print(f"Size: {data_temp.shape}") plt.figure(figsize=(15,5)) for i, h in enumerate(head): plt.subplot(int(len(head)/6+1),6,i+1) sns.violinplot(data_temp[h]) plt.title(h) if 'bias' in h: plt.xlim(-3,3) else: plt.xlim(-6,6) plt.yticks([]) plt.tight_layout() plt.show() plt.close() # indices = (data_pd['conn1'] >= 3) & (data_pd['conn0'] >= 3) indices = None adapt_and_show(data_pd, indices) # PP indices = (data_pd['conn1'] >= 0) & (data_pd['conn0'] >= 0) adapt_and_show(data_pd, indices) # PN indices = (data_pd['conn1'] >= 0) & (data_pd['conn0'] < 0) adapt_and_show(data_pd, indices) # NP indices = (data_pd['conn1'] < 0) & (data_pd['conn0'] >= 0) adapt_and_show(data_pd, indices) # NN indices = (data_pd['conn1'] < 0) & (data_pd['conn0'] < 0) adapt_and_show(data_pd, indices) h = 'conn1' t = '$c_2$' COLORS = ['#ffffff', sns.color_palette()[0], sns.color_palette()[1], sns.color_palette()[2], sns.color_palette()[3]] plt.figure(figsize=(3,3)) gs1 = gridspec.GridSpec(5,1) gs1.update(wspace=0, hspace=0) for i in sorted([i for i in range(5)], reverse=True): plt.subplot(gs1[i]) if i == 0: plt.title(t) # Format data if i == 0: data_temp = data_pd elif i == 1: indices = (data_pd['conn0'] >= 0) & (data_pd['conn1'] >= 0) data_temp = data_pd[indices.values] elif i == 2: indices = (data_pd['conn0'] >= 0) & (data_pd['conn1'] < 0) data_temp = data_pd[indices.values] elif i == 3: indices = (data_pd['conn0'] < 0) & (data_pd['conn1'] >= 0) data_temp = data_pd[indices.values] else: indices = (data_pd['conn0'] < 0) & (data_pd['conn1'] < 0) data_temp = data_pd[indices.values] # Create the plot sns.violinplot(data_temp[h], color=COLORS[i]) if 'bias' in h: plt.xlim(-3,3) else: plt.xlim(-6,6) plt.yticks([]) plt.xlabel('') if i < 4: plt.gca().set_xticklabels([]) plt.savefig(f"delete_me/{h}.png", bbox_inches='tight', pad_inches=0.02) plt.savefig(f"delete_me/{h}.eps", format='eps', bbox_inches='tight', pad_inches=0.02) plt.show() plt.close() def classify_connections(r): if r['conn0'] >= 0 and r['conn1'] >= 0: return "$c_1 \geq 0 , c_2 \geq 0$" # Positive Positive elif r['conn0'] >= 0 and r['conn1'] < 0: return "$c_1 \geq 0 , c_2 < 0$" # Positive Negative elif r['conn0'] < 0 and r['conn1'] >= 0: return "$c_1 < 0 , c_2 \geq 0$" # Negative Positive else: return "$c_1 < 0 , c_2 < 0$" # Negative Negative if classify: data_pd['classes'] = data_pd.apply(lambda row: classify_connections(row), axis=1).values data_pd.head() pca = PCA(n_components=3) pca_result = pca.fit_transform(data_pd[head].values) data_pd['pca-one'] = pca_result[:,0] data_pd['pca-two'] = pca_result[:,1] data_pd['pca-three'] = pca_result[:,2] print('Explained variation per principal component: {}'.format(pca.explained_variance_ratio_)) plt.figure(figsize=(5,5)) sns.scatterplot( x="pca-one", y="pca-two", hue="classes", palette="tab10", data=data_pd, ) plt.tight_layout() legend_elements = [ Line2D([0], [0], marker='o', color=COLORS[0], label="$c_1 \geq 0 , c_2 \geq 0$", markerfacecolor=COLORS[1], markersize=10), Line2D([0], [0], marker='o', color=COLORS[0], label="$c_1 \geq 0 , c_2 < 0$", markerfacecolor=COLORS[2], markersize=10), Line2D([0], [0], marker='o', color=COLORS[0], label="$c_1 < 0 , c_2 \geq 0$", markerfacecolor=COLORS[3], markersize=10), Line2D([0], [0], marker='o', color=COLORS[0], label="$c_1 < 0 , c_2 < 0$", markerfacecolor=COLORS[4], markersize=10), ] plt.legend(handles=legend_elements, loc='center left', title=None, bbox_to_anchor=(1, 0.5), fancybox=True, fontsize=10, ncol=1) # plt.axhline(0, linewidth=0.5, color=(0.5,0.5,0.5,0.5)) # plt.axvline(0, linewidth=0.5, color=(0.5,0.5,0.5,0.5)) plt.xlim(-8,8) plt.ylim(-8,8) plt.xlabel("First principal component") plt.ylabel("Second principal component") plt.savefig(f"delete_me/pca.png", bbox_inches='tight', pad_inches=0.02) plt.savefig(f"delete_me/pca.eps", format='eps', bbox_inches='tight', pad_inches=0.02) plt.show() plt.close() # ax = plt.figure(figsize=(16,10)).gca(projection='3d') # ax.scatter( # xs=df["pca-one"], # ys=df["pca-two"], # zs=df["pca-three"], # # xs=df.loc[rndperm,:]["pca-one"], # # ys=df.loc[rndperm,:]["pca-two"], # # zs=df.loc[rndperm,:]["pca-three"], # # cmap='tab10', # # c=[int(i) for i in df.loc[rndperm,:]["y"]], # c=[(1, 1-5*(1-min(i,1)), 0, min(i,1)) for i in df["finished"]], # ) # ax.set_xlabel('pca-one') # ax.set_ylabel('pca-two') # ax.set_zlabel('pca-three') # plt.show() df_values = data_pd[head].values pca = PCA(n_components=3) pca_result = pca.fit_transform(df_values) data_pd['pca-one'] = pca_result[:,0] data_pd['pca-two'] = pca_result[:,1] data_pd['pca-three'] = pca_result[:,2] print('Explained variation per principal component: {}'.format(pca.explained_variance_ratio_)) time_start = time.time() tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300) tsne_results = tsne.fit_transform(df_values) print('t-SNE done! Time elapsed: {} seconds'.format(time.time()-time_start)) data_pd['tsne-2d-one'] = tsne_results[:,0] data_pd['tsne-2d-two'] = tsne_results[:,1] plt.figure(figsize=(6,6)) sns.scatterplot( x="tsne-2d-one", y="tsne-2d-two", hue="classes", data=data_pd, ) plt.tight_layout() leg = plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.07), fancybox=True, fontsize=10, ncol=5) plt.show() plt.close() # time_start = time.time() # tsne = TSNE(n_components=3, verbose=1, perplexity=40, n_iter=300) # tsne_results = tsne.fit_transform(df_values) # print('t-SNE done! Time elapsed: {} seconds'.format(time.time()-time_start)) # df['tsne-3d-one'] = tsne_results[:,0] # df['tsne-3d-two'] = tsne_results[:,1] # df['tsne-3d-three'] = tsne_results[:,2] # ax = plt.figure(figsize=(16,10)).gca(projection='3d') # ax.scatter( # xs=df["tsne-3d-one"], # ys=df["tsne-3d-two"], # zs=df["tsne-3d-three"], # # xs=df.loc[rndperm,:]["pca-one"], # # ys=df.loc[rndperm,:]["pca-two"], # # zs=df.loc[rndperm,:]["pca-three"], # # cmap='tab10', # # c=[int(i) for i in df.loc[rndperm,:]["y"]], # c=[(1, 1-5*(1-min(i,1)), 0, min(i,1)) for i in df["finished"]], # ) # ax.set_xlabel('tsne-3d-one') # ax.set_ylabel('tsne-3d-two') # ax.set_zlabel('tsne-3d-three') # plt.show()
0.548553
0.802594
# Bit Reader Encode/Decode a set of bits Initializes with parameter `options`, which must be a dictionary with the following format: - keys must be a str with the bits places, example: '0-1' means bit 0 and bit 1 - values must be a dictionary with the bit value as the key and the category (str) as value. Categories must be unique. - Encode: given a category/categories return a list of possible values - Decode: given a value return a list of categories ## Example MOD09 (http://modis-sr.ltdri.org/guide/MOD09_UserGuide_v1_3.pdf) (page 28, state1km, 16 bits): ``` from geetools import bitreader options = { '0-1': {0:'clear', 1:'cloud', 2:'mix'}, # cloud state '2-2': {0: 'no_shadow', 1:'shadow'}, # cloud shadow (bit 0 is not needed) '6-7': {0:'climatology', 1:'low', 2:'average', 3:'high'} # land/water flag } reader = bitreader.BitReader(options, 16) ``` Internally it computes a dict with - bit_length (length of the group of bits) - lshift (left shift) - shifted (shifted places) ``` reader.info print('bit length', reader.bit_length) ``` DECODE ONE VALUE ``` value = 204 bits = reader.get_bin(value) print('204:', bits) reader.decode(204) ``` MATCH ONE VALUE ``` reader.match(204, 'cloud') reader.match(204, 'shadow') ``` ENCODE A VALUE (EXCLUSIVELLY) In this case, shadow is 00000100 (4) and **not** 00000101 (5) ``` reader.encode('shadow') reader.encode('clear') reader.encode('no_shadow') ``` ENCODE A VALUE (ALL) This will get **all** values (all combinations where the bit is set) ``` print(reader.encode_one('shadow')[0:100]) print(reader.encode_one('cloud')[0:100]) ``` ENCODE AND ``` print(reader.encode_and('cloud', 'shadow')[0:100]) ``` ### DECODE AN IMAGE ``` import ee from geetools import ui, cloud_mask Map = ui.Map(tabs=['Inspector', 'Layers']) Map.show() modcol = ee.ImageCollection('MODIS/006/MOD09GA').sort('system:time_start', False) mod = ee.Image(modcol.first()) ``` BANDS ``` red = 'sur_refl_b01' green = 'sur_refl_b04' blue = 'sur_refl_b03' qa = 'state_1km' qa_mask = mod.select(qa) Map.addLayer(mod, {'bands':[red, green, blue], 'min':0, 'max':5000}, 'Original') Map.addLayer(qa_mask, {'min':0, 'max':reader.max}, 'QA') ``` APPLY THE `BitReader` TO THE BAND THAT HOLDS THE BIT INFORMATION ``` mask = reader.decode_image(qa, mod) Map.addLayer(mask.select(['cloud']), {'min':0, 'max':1}, 'Clouds') ``` `BitReader` INFORMATION FOR KNOW COLLECTIONS AVAILABLE IN `geetools.cloud_mask` MODULE ``` from geetools import cloud_mask state1km = cloud_mask.BITS_MODIS09GA state1km ```
github_jupyter
from geetools import bitreader options = { '0-1': {0:'clear', 1:'cloud', 2:'mix'}, # cloud state '2-2': {0: 'no_shadow', 1:'shadow'}, # cloud shadow (bit 0 is not needed) '6-7': {0:'climatology', 1:'low', 2:'average', 3:'high'} # land/water flag } reader = bitreader.BitReader(options, 16) reader.info print('bit length', reader.bit_length) value = 204 bits = reader.get_bin(value) print('204:', bits) reader.decode(204) reader.match(204, 'cloud') reader.match(204, 'shadow') reader.encode('shadow') reader.encode('clear') reader.encode('no_shadow') print(reader.encode_one('shadow')[0:100]) print(reader.encode_one('cloud')[0:100]) print(reader.encode_and('cloud', 'shadow')[0:100]) import ee from geetools import ui, cloud_mask Map = ui.Map(tabs=['Inspector', 'Layers']) Map.show() modcol = ee.ImageCollection('MODIS/006/MOD09GA').sort('system:time_start', False) mod = ee.Image(modcol.first()) red = 'sur_refl_b01' green = 'sur_refl_b04' blue = 'sur_refl_b03' qa = 'state_1km' qa_mask = mod.select(qa) Map.addLayer(mod, {'bands':[red, green, blue], 'min':0, 'max':5000}, 'Original') Map.addLayer(qa_mask, {'min':0, 'max':reader.max}, 'QA') mask = reader.decode_image(qa, mod) Map.addLayer(mask.select(['cloud']), {'min':0, 'max':1}, 'Clouds') from geetools import cloud_mask state1km = cloud_mask.BITS_MODIS09GA state1km
0.322739
0.863103
``` import numpy as np import numpy as np def sigmoid(x): return 1.0/(1.0 + np.exp(-x)) def sigmoid_prime(x): return sigmoid(x)*(1.0-sigmoid(x)) def tanh(x): return np.tanh(x) def tanh_prime(x): return 1.0 - x**2 class NeuralNetwork: def __init__(self, layers, activation='tanh'): if activation == 'sigmoid': self.activation = sigmoid self.activation_prime = sigmoid_prime elif activation == 'tanh': self.activation = tanh self.activation_prime = tanh_prime # Set weights self.weights = [] # layers = [2,2,1] # range of weight values (-1,1) # input and hidden layers - random((2+1, 2+1)) : 3 x 3 for i in range(1, len(layers) - 1): r = 2*np.random.random((layers[i-1] + 1, layers[i] + 1)) -1 self.weights.append(r) # output layer - random((2+1, 1)) : 3 x 1 r = 2*np.random.random( (layers[i] + 1, layers[i+1])) - 1 self.weights.append(r) def fit(self, X, y, learning_rate=0.2, epochs=100000): # Add column of ones to X # This is to add the bias unit to the input layer ones = np.atleast_2d(np.ones(X.shape[0])) X = np.concatenate((ones.T, X), axis=1) for k in range(epochs): if k % 10000 == 0: print ('epochs:', k) i = np.random.randint(X.shape[0]) a = [X[i]] for l in range(len(self.weights)): dot_value = np.dot(a[l], self.weights[l]) activation = self.activation(dot_value) a.append(activation) # output layer error = y[i] - a[-1] deltas = [error * self.activation_prime(a[-1])] # we need to begin at the second to last layer # (a layer before the output layer) for l in range(len(a) - 2, 0, -1): deltas.append(deltas[-1].dot(self.weights[l].T)*self.activation_prime(a[l])) # reverse # [level3(output)->level2(hidden)] => [level2(hidden)->level3(output)] deltas.reverse() # backpropagation # 1. Multiply its output delta and input activation # to get the gradient of the weight. # 2. Subtract a ratio (percentage) of the gradient from the weight. for i in range(len(self.weights)): layer = np.atleast_2d(a[i]) delta = np.atleast_2d(deltas[i]) self.weights[i] += learning_rate * layer.T.dot(delta) def predict(self, x): a = np.concatenate((np.ones(1).T, np.array(x)), axis=0) for l in range(0, len(self.weights)): a = self.activation(np.dot(a, self.weights[l])) return a if __name__ == '__main__': nn = NeuralNetwork([2,2,1]) X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) y_xor = np.array([0, 1, 1, 0]) y_xnor = np.array([1, 0, 0, 1]) nn.fit(X, y_xor) for e in X: print(e,nn.predict(e)) def xor_network(a, b, flag): #Instantiating NN nn_xor = NeuralNetwork([2,2,1]) nn_xnor = NeuralNetwork([2,2,1]) #a and b are binary numbers a1 = int(str(a)[0]) a2 = int(str(a)[1]) b1 = int(str(b)[0]) b2 = int(str(b)[1]) if (flag == 1): #Fitting nn_xor.fit(X, y_xor) #predicting y1 = abs(np.round(nn_xor.predict([a1, b1]))) y2 = abs(np.round(nn_xor.predict([a2, b2]))) else: #Fitting nn_xnor.fit(X, y_xnor) #predicting y1 = abs(np.round(nn_xnor.predict([a1, b1]))) y2 = abs(np.round(nn_xnor.predict([a2, b2]))) y = str(y1) + str(y2) print("\noutput:") print(y1, y2) a= '11' b= '01' xor_network(a, b, 0) ```
github_jupyter
import numpy as np import numpy as np def sigmoid(x): return 1.0/(1.0 + np.exp(-x)) def sigmoid_prime(x): return sigmoid(x)*(1.0-sigmoid(x)) def tanh(x): return np.tanh(x) def tanh_prime(x): return 1.0 - x**2 class NeuralNetwork: def __init__(self, layers, activation='tanh'): if activation == 'sigmoid': self.activation = sigmoid self.activation_prime = sigmoid_prime elif activation == 'tanh': self.activation = tanh self.activation_prime = tanh_prime # Set weights self.weights = [] # layers = [2,2,1] # range of weight values (-1,1) # input and hidden layers - random((2+1, 2+1)) : 3 x 3 for i in range(1, len(layers) - 1): r = 2*np.random.random((layers[i-1] + 1, layers[i] + 1)) -1 self.weights.append(r) # output layer - random((2+1, 1)) : 3 x 1 r = 2*np.random.random( (layers[i] + 1, layers[i+1])) - 1 self.weights.append(r) def fit(self, X, y, learning_rate=0.2, epochs=100000): # Add column of ones to X # This is to add the bias unit to the input layer ones = np.atleast_2d(np.ones(X.shape[0])) X = np.concatenate((ones.T, X), axis=1) for k in range(epochs): if k % 10000 == 0: print ('epochs:', k) i = np.random.randint(X.shape[0]) a = [X[i]] for l in range(len(self.weights)): dot_value = np.dot(a[l], self.weights[l]) activation = self.activation(dot_value) a.append(activation) # output layer error = y[i] - a[-1] deltas = [error * self.activation_prime(a[-1])] # we need to begin at the second to last layer # (a layer before the output layer) for l in range(len(a) - 2, 0, -1): deltas.append(deltas[-1].dot(self.weights[l].T)*self.activation_prime(a[l])) # reverse # [level3(output)->level2(hidden)] => [level2(hidden)->level3(output)] deltas.reverse() # backpropagation # 1. Multiply its output delta and input activation # to get the gradient of the weight. # 2. Subtract a ratio (percentage) of the gradient from the weight. for i in range(len(self.weights)): layer = np.atleast_2d(a[i]) delta = np.atleast_2d(deltas[i]) self.weights[i] += learning_rate * layer.T.dot(delta) def predict(self, x): a = np.concatenate((np.ones(1).T, np.array(x)), axis=0) for l in range(0, len(self.weights)): a = self.activation(np.dot(a, self.weights[l])) return a if __name__ == '__main__': nn = NeuralNetwork([2,2,1]) X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) y_xor = np.array([0, 1, 1, 0]) y_xnor = np.array([1, 0, 0, 1]) nn.fit(X, y_xor) for e in X: print(e,nn.predict(e)) def xor_network(a, b, flag): #Instantiating NN nn_xor = NeuralNetwork([2,2,1]) nn_xnor = NeuralNetwork([2,2,1]) #a and b are binary numbers a1 = int(str(a)[0]) a2 = int(str(a)[1]) b1 = int(str(b)[0]) b2 = int(str(b)[1]) if (flag == 1): #Fitting nn_xor.fit(X, y_xor) #predicting y1 = abs(np.round(nn_xor.predict([a1, b1]))) y2 = abs(np.round(nn_xor.predict([a2, b2]))) else: #Fitting nn_xnor.fit(X, y_xnor) #predicting y1 = abs(np.round(nn_xnor.predict([a1, b1]))) y2 = abs(np.round(nn_xnor.predict([a2, b2]))) y = str(y1) + str(y2) print("\noutput:") print(y1, y2) a= '11' b= '01' xor_network(a, b, 0)
0.649467
0.721547
``` import numpy as np import skfuzzy as fuzz from skfuzzy import control as ctrl # Create Antecedent/Consequent objects PM25 = ctrl.Antecedent(np.arange(0, 950, 1), 'PM2.5') PM10 = ctrl.Antecedent(np.arange(0, 1000, 1), 'PM10') NO2 = ctrl.Antecedent(np.arange(0, 500, 1), 'NO2') NH3 = ctrl.Antecedent(np.arange(0, 2000, 1), 'NH3') CO = ctrl.Antecedent(np.arange(0, 180, 1), 'CO') SO2 = ctrl.Antecedent(np.arange(0, 1900, 1), 'SO2') O3 = ctrl.Antecedent(np.arange(0, 760, 1), 'O3') air_q_i = ctrl.Consequent(np.arange(0,500,1), 'air_q_i') #create membershiipfunction #CO membership function #all values are calculated based on air quality range in INDAI. CO['Good'] = fuzz.trapmf(CO.universe, [0, 0, 0.9, 1.1]) CO['Satisfactory'] = fuzz.trapmf(CO.universe, [0.9, 1.1, 1.9, 2.3]) CO['Moderate'] = fuzz.trapmf(CO.universe, [1.9, 2.3, 9.5, 10.3]) CO['Poor'] = fuzz.trapmf(CO.universe, [9.5, 10.3, 16.7, 19]) CO['Very Poor'] = fuzz.trapmf(CO.universe, [16.7, 19, 32, 36]) CO['Severe']= fuzz.trapmf(CO.universe, [32, 36, 180, 180]) CO.view() #PM10 membership function #all values are calculated based on air quality range in INDAI. PM10['Good'] = fuzz.trapmf(PM10.universe, [0, 0, 47, 52]) PM10['Satisfactory'] = fuzz.trapmf(PM10.universe, [47, 52, 95, 112]) PM10['Moderate'] = fuzz.trapmf(PM10.universe, [95, 112, 248, 256]) PM10['Poor'] = fuzz.trapmf(PM10.universe, [248, 256, 345, 355]) PM10['Very Poor'] = fuzz.trapmf(PM10.universe, [345,355,423,435]) PM10['Severe']= fuzz.trapmf(PM10.universe, [423,435,1000,1000]) PM10.view() #PM2.5 membership function #all values are calculated based on air quality range in INDAI. PM25['Good'] = fuzz.trapmf(PM25.universe, [0, 0, 28,32 ]) PM25['Satisfactory'] = fuzz.trapmf(PM25.universe, [28, 32 ,57, 85 ]) PM25['Moderate'] = fuzz.trapmf(PM25.universe, [ 57, 85, 87, 92]) PM25['Poor'] = fuzz.trapmf(PM25.universe, [ 87, 92, 119, 125]) PM25['Very Poor'] = fuzz.trapmf(PM25.universe, [ 119, 125, 246, 254]) PM25['Severe']= fuzz.trapmf(PM25.universe, [ 246, 254, 1000,1000]) PM25.view() #NO2 membership function #all values are calculated based on air quality range in INDAI. NO2['Good'] = fuzz.trapmf( NO2.universe, [ 0,0,38,42]) NO2['Satisfactory'] = fuzz.trapmf( NO2.universe, [38,42,78,85 ]) NO2['Moderate'] = fuzz.trapmf( NO2.universe, [78,85,178,183 ]) NO2['Poor'] = fuzz.trapmf( NO2.universe, [178,183,279,285]) NO2['Very Poor'] = fuzz.trapmf( NO2.universe, [279,285,397,402 ]) NO2['Severe']= fuzz.trapmf( NO2.universe, [397,402,500,500 ]) NO2.view() #O3 membership function #all values are calculated based on air quality range in INDAI. O3['Good'] = fuzz.trapmf( O3.universe, [0,0,48,52 ]) O3['Satisfactory'] = fuzz.trapmf( O3.universe, [ 48,52,98,103]) O3['Moderate'] = fuzz.trapmf( O3.universe, [ 98,103,167,170]) O3['Poor'] = fuzz.trapmf( O3.universe, [167,170,207,225 ]) O3['Very Poor'] = fuzz.trapmf( O3.universe, [ 207,225,728,752]) O3['Severe']= fuzz.trapmf( O3.universe, [728,752,760,760]) O3.view() #SO2 membership function #all values are calculated based on air quality range in INDAI. SO2['Severe']= fuzz.trapmf( SO2.universe, [ 0,0,38,42]) SO2['Good'] = fuzz.trapmf( SO2.universe, [ 38,42,78,82]) SO2['Satisfactory'] = fuzz.trapmf( SO2.universe, [ 78,82,280,395]) SO2['Moderate'] = fuzz.trapmf( SO2.universe, [280,395,775,840 ]) SO2['Poor'] = fuzz.trapmf( SO2.universe, [775,840,1560,1620 ]) SO2['Very Poor'] = fuzz.trapmf( SO2.universe, [1560,1620,1900,1900 ]) SO2.view() #NH3 membership function #all values are calculated based on air quality range in INDAI. NH3['Good'] = fuzz.trapmf( NH3.universe, [ 0,0,80,220]) NH3['Satisfactory'] = fuzz.trapmf( NH3.universe, [80,220,390,420 ]) NH3['Moderate'] = fuzz.trapmf( NH3.universe, [ 390,420,780,820]) NH3['Poor'] = fuzz.trapmf( NH3.universe, [782,820,1180,1220 ]) NH3['Very Poor'] = fuzz.trapmf( NH3.universe, [1180,1220,1780,1820]) NH3['Severe']= fuzz.trapmf( NH3.universe, [ 1780,1820,2000,2000]) NH3.view() #India standard Air quality index Range air_q_i['Good'] = fuzz.trapmf(air_q_i.universe, [0, 0, 40, 60]) air_q_i['Satisfactory'] = fuzz.trapmf(air_q_i.universe, [40, 60, 90, 110]) air_q_i['Moderate'] = fuzz.trapmf(air_q_i.universe, [90, 110, 140, 160]) air_q_i['Poor'] = fuzz.trapmf(air_q_i.universe, [140, 160, 190, 210]) air_q_i['Very Poor'] = fuzz.trapmf(air_q_i.universe, [190, 210, 270, 320]) air_q_i['Severe'] = fuzz.trapmf(air_q_i.universe, [270, 320, 500, 500]) air_q_i.view() #creation of rules derived from Desicion tree r0 = ctrl.Rule(antecedent=(PM10['Good'] & O3['Good'] & NO2['Good'] ),consequent=(air_q_i['Good'])) r1 = ctrl.Rule(antecedent=(PM10['Good'] & O3['Good'] & NO2['Satisfactory'] ),consequent=(air_q_i['Satisfactory'] )) r2 = ctrl.Rule(antecedent=(PM10['Good'] & O3['Satisfactory']),consequent=(air_q_i['Satisfactory'])) r3 = ctrl.Rule(antecedent=(PM10['Good'] & O3['Moderate']),consequent=(air_q_i['Moderate'])) r4 = ctrl.Rule(antecedent=(PM10['Good'] & PM25['Moderate'] & NO2['Satisfactory']), consequent=(air_q_i['Satisfactory'])) r5 = ctrl.Rule(antecedent=(PM10['Good'] & PM25['Moderate'] & NO2['Moderate']),consequent=(air_q_i['Moderate'])) r6 = ctrl.Rule(antecedent=(PM10['Good'] & PM25['Poor'] ),consequent=(air_q_i['Moderate'])) r7 = ctrl.Rule(antecedent=(PM10['Good'] & PM25['Very Poor'] ),consequent=(air_q_i['Poor'])) r8 = ctrl.Rule(antecedent=(PM10['Satisfactory'] & SO2['Satisfactory'] & NO2['Satisfactory'] & PM25['Moderate']),consequent=(air_q_i['Satisfactory'])) r9 = ctrl.Rule(antecedent=(PM10['Satisfactory'] & SO2['Satisfactory'] & NO2['Satisfactory'] & PM25['Poor']),consequent=(air_q_i['Moderate'])) r10 = ctrl.Rule(antecedent=(PM10['Satisfactory'] & SO2['Moderate'] & NO2['Satisfactory']),consequent=(air_q_i['Moderate'])) r11 = ctrl.Rule(antecedent=(PM10['Satisfactory'] & NO2['Moderate']),consequent=(air_q_i['Moderate'])) r12 = ctrl.Rule(antecedent=(PM10['Moderate'] & PM25['Poor'] & NO2['Poor'] & NH3['Good']),consequent=(air_q_i['Moderate'])) r13 = ctrl.Rule(antecedent=(PM10['Moderate'] & PM25['Poor'] & NO2['Very Poor'] & NH3['Good']),consequent=(air_q_i['Poor'])) r14 = ctrl.Rule(antecedent=(PM10['Moderate'] & PM25['Poor'] & NH3['Moderate']),consequent=(air_q_i['Very Poor'])) r15 = ctrl.Rule(antecedent=(PM10['Moderate'] & PM25['Very Poor']),consequent=(air_q_i['Very Poor'])) r16 = ctrl.Rule(antecedent=(PM10['Moderate'] & PM25['Severe']),consequent=(air_q_i['Severe'])) r17 = ctrl.Rule(antecedent=(PM10['Poor'] & PM25['Very Poor']),consequent=(air_q_i['Poor'])) r18 = ctrl.Rule(antecedent=(PM10['Poor'] & PM25['Severe'] & NH3['Good']),consequent=(air_q_i['Very Poor'])) r19 = ctrl.Rule(antecedent=(PM10['Poor'] & PM25['Severe'] & NH3['Satisfactory']),consequent=(air_q_i['Severe'])) r20 = ctrl.Rule(antecedent=(PM10['Very Poor'] & PM25['Severe']),consequent=(air_q_i['Severe'])) r21 = ctrl.Rule(antecedent=(PM10['Severe'] & PM25['Severe'] ),consequent=(air_q_i['Severe'])) r22 = ctrl.Rule(antecedent=(PM10['Very Poor'] & PM25['Very Poor']),consequent=(air_q_i['Very Poor'])) r23 = ctrl.Rule(antecedent=(PM10['Severe']),consequent=(air_q_i['Severe'])) r24 = ctrl.Rule(CO['Good'] , air_q_i['Good']) r25 = ctrl.Rule(CO['Poor'] , air_q_i['Poor']) r26 = ctrl.Rule(CO['Very Poor'] , air_q_i['Very Poor']) r27 = ctrl.Rule(CO['Satisfactory'] , air_q_i['Satisfactory']) r28 = ctrl.Rule(CO['Severe'] , air_q_i['Severe']) r29 = ctrl.Rule(CO['Moderate'] , air_q_i['Moderate']) r20.view() tx = [r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15,r16,r17,r18,r19,r20,r21,r22,r23,r24,r25,r26,r27,r28,r29] #sample: Mapping undefined rules to defined rules r1.view() r3.view() '''tipping_ctrl = ctrl.ControlSystem(xx) tipping = ctrl.ControlSystemSimulation(tipping_ctrl) ''' Air_ctrl = ctrl.ControlSystem(tx) Air_calc = ctrl.ControlSystemSimulation(Air_ctrl) Air_calc # Pass inputs to the ControlSystem using Antecedent labels with Pythonic API # Note: if you like passing many inputs all at once Air_calc.input['CO'] = 15 Air_calc.input['PM10'] = 80 Air_calc.input['NO2'] = 89 Air_calc.input['O3'] = 55 Air_calc.input['NH3'] = 88 Air_calc.input['PM2.5'] = 57 Air_calc.input['SO2'] = 900 # compute the input and gaain the output Air_calc.compute() print(Air_calc.output['air_q_i']) def Final_output(x): if x <= 50 and x >= 0: return "Good" elif x <= 101 and x > 50: return "Satisfactory" elif x <= 200 and x > 101: return "Moderate" elif x <= 300 and x > 200: return "Poor" elif x <= 400 and x > 300: return "Very Poor" elif x > 400: return "Severe" else: return "Unknown" print(Final_output(Air_calc.output['air_q_i'])) air_q_i.view(sim=Air_calc) ```
github_jupyter
import numpy as np import skfuzzy as fuzz from skfuzzy import control as ctrl # Create Antecedent/Consequent objects PM25 = ctrl.Antecedent(np.arange(0, 950, 1), 'PM2.5') PM10 = ctrl.Antecedent(np.arange(0, 1000, 1), 'PM10') NO2 = ctrl.Antecedent(np.arange(0, 500, 1), 'NO2') NH3 = ctrl.Antecedent(np.arange(0, 2000, 1), 'NH3') CO = ctrl.Antecedent(np.arange(0, 180, 1), 'CO') SO2 = ctrl.Antecedent(np.arange(0, 1900, 1), 'SO2') O3 = ctrl.Antecedent(np.arange(0, 760, 1), 'O3') air_q_i = ctrl.Consequent(np.arange(0,500,1), 'air_q_i') #create membershiipfunction #CO membership function #all values are calculated based on air quality range in INDAI. CO['Good'] = fuzz.trapmf(CO.universe, [0, 0, 0.9, 1.1]) CO['Satisfactory'] = fuzz.trapmf(CO.universe, [0.9, 1.1, 1.9, 2.3]) CO['Moderate'] = fuzz.trapmf(CO.universe, [1.9, 2.3, 9.5, 10.3]) CO['Poor'] = fuzz.trapmf(CO.universe, [9.5, 10.3, 16.7, 19]) CO['Very Poor'] = fuzz.trapmf(CO.universe, [16.7, 19, 32, 36]) CO['Severe']= fuzz.trapmf(CO.universe, [32, 36, 180, 180]) CO.view() #PM10 membership function #all values are calculated based on air quality range in INDAI. PM10['Good'] = fuzz.trapmf(PM10.universe, [0, 0, 47, 52]) PM10['Satisfactory'] = fuzz.trapmf(PM10.universe, [47, 52, 95, 112]) PM10['Moderate'] = fuzz.trapmf(PM10.universe, [95, 112, 248, 256]) PM10['Poor'] = fuzz.trapmf(PM10.universe, [248, 256, 345, 355]) PM10['Very Poor'] = fuzz.trapmf(PM10.universe, [345,355,423,435]) PM10['Severe']= fuzz.trapmf(PM10.universe, [423,435,1000,1000]) PM10.view() #PM2.5 membership function #all values are calculated based on air quality range in INDAI. PM25['Good'] = fuzz.trapmf(PM25.universe, [0, 0, 28,32 ]) PM25['Satisfactory'] = fuzz.trapmf(PM25.universe, [28, 32 ,57, 85 ]) PM25['Moderate'] = fuzz.trapmf(PM25.universe, [ 57, 85, 87, 92]) PM25['Poor'] = fuzz.trapmf(PM25.universe, [ 87, 92, 119, 125]) PM25['Very Poor'] = fuzz.trapmf(PM25.universe, [ 119, 125, 246, 254]) PM25['Severe']= fuzz.trapmf(PM25.universe, [ 246, 254, 1000,1000]) PM25.view() #NO2 membership function #all values are calculated based on air quality range in INDAI. NO2['Good'] = fuzz.trapmf( NO2.universe, [ 0,0,38,42]) NO2['Satisfactory'] = fuzz.trapmf( NO2.universe, [38,42,78,85 ]) NO2['Moderate'] = fuzz.trapmf( NO2.universe, [78,85,178,183 ]) NO2['Poor'] = fuzz.trapmf( NO2.universe, [178,183,279,285]) NO2['Very Poor'] = fuzz.trapmf( NO2.universe, [279,285,397,402 ]) NO2['Severe']= fuzz.trapmf( NO2.universe, [397,402,500,500 ]) NO2.view() #O3 membership function #all values are calculated based on air quality range in INDAI. O3['Good'] = fuzz.trapmf( O3.universe, [0,0,48,52 ]) O3['Satisfactory'] = fuzz.trapmf( O3.universe, [ 48,52,98,103]) O3['Moderate'] = fuzz.trapmf( O3.universe, [ 98,103,167,170]) O3['Poor'] = fuzz.trapmf( O3.universe, [167,170,207,225 ]) O3['Very Poor'] = fuzz.trapmf( O3.universe, [ 207,225,728,752]) O3['Severe']= fuzz.trapmf( O3.universe, [728,752,760,760]) O3.view() #SO2 membership function #all values are calculated based on air quality range in INDAI. SO2['Severe']= fuzz.trapmf( SO2.universe, [ 0,0,38,42]) SO2['Good'] = fuzz.trapmf( SO2.universe, [ 38,42,78,82]) SO2['Satisfactory'] = fuzz.trapmf( SO2.universe, [ 78,82,280,395]) SO2['Moderate'] = fuzz.trapmf( SO2.universe, [280,395,775,840 ]) SO2['Poor'] = fuzz.trapmf( SO2.universe, [775,840,1560,1620 ]) SO2['Very Poor'] = fuzz.trapmf( SO2.universe, [1560,1620,1900,1900 ]) SO2.view() #NH3 membership function #all values are calculated based on air quality range in INDAI. NH3['Good'] = fuzz.trapmf( NH3.universe, [ 0,0,80,220]) NH3['Satisfactory'] = fuzz.trapmf( NH3.universe, [80,220,390,420 ]) NH3['Moderate'] = fuzz.trapmf( NH3.universe, [ 390,420,780,820]) NH3['Poor'] = fuzz.trapmf( NH3.universe, [782,820,1180,1220 ]) NH3['Very Poor'] = fuzz.trapmf( NH3.universe, [1180,1220,1780,1820]) NH3['Severe']= fuzz.trapmf( NH3.universe, [ 1780,1820,2000,2000]) NH3.view() #India standard Air quality index Range air_q_i['Good'] = fuzz.trapmf(air_q_i.universe, [0, 0, 40, 60]) air_q_i['Satisfactory'] = fuzz.trapmf(air_q_i.universe, [40, 60, 90, 110]) air_q_i['Moderate'] = fuzz.trapmf(air_q_i.universe, [90, 110, 140, 160]) air_q_i['Poor'] = fuzz.trapmf(air_q_i.universe, [140, 160, 190, 210]) air_q_i['Very Poor'] = fuzz.trapmf(air_q_i.universe, [190, 210, 270, 320]) air_q_i['Severe'] = fuzz.trapmf(air_q_i.universe, [270, 320, 500, 500]) air_q_i.view() #creation of rules derived from Desicion tree r0 = ctrl.Rule(antecedent=(PM10['Good'] & O3['Good'] & NO2['Good'] ),consequent=(air_q_i['Good'])) r1 = ctrl.Rule(antecedent=(PM10['Good'] & O3['Good'] & NO2['Satisfactory'] ),consequent=(air_q_i['Satisfactory'] )) r2 = ctrl.Rule(antecedent=(PM10['Good'] & O3['Satisfactory']),consequent=(air_q_i['Satisfactory'])) r3 = ctrl.Rule(antecedent=(PM10['Good'] & O3['Moderate']),consequent=(air_q_i['Moderate'])) r4 = ctrl.Rule(antecedent=(PM10['Good'] & PM25['Moderate'] & NO2['Satisfactory']), consequent=(air_q_i['Satisfactory'])) r5 = ctrl.Rule(antecedent=(PM10['Good'] & PM25['Moderate'] & NO2['Moderate']),consequent=(air_q_i['Moderate'])) r6 = ctrl.Rule(antecedent=(PM10['Good'] & PM25['Poor'] ),consequent=(air_q_i['Moderate'])) r7 = ctrl.Rule(antecedent=(PM10['Good'] & PM25['Very Poor'] ),consequent=(air_q_i['Poor'])) r8 = ctrl.Rule(antecedent=(PM10['Satisfactory'] & SO2['Satisfactory'] & NO2['Satisfactory'] & PM25['Moderate']),consequent=(air_q_i['Satisfactory'])) r9 = ctrl.Rule(antecedent=(PM10['Satisfactory'] & SO2['Satisfactory'] & NO2['Satisfactory'] & PM25['Poor']),consequent=(air_q_i['Moderate'])) r10 = ctrl.Rule(antecedent=(PM10['Satisfactory'] & SO2['Moderate'] & NO2['Satisfactory']),consequent=(air_q_i['Moderate'])) r11 = ctrl.Rule(antecedent=(PM10['Satisfactory'] & NO2['Moderate']),consequent=(air_q_i['Moderate'])) r12 = ctrl.Rule(antecedent=(PM10['Moderate'] & PM25['Poor'] & NO2['Poor'] & NH3['Good']),consequent=(air_q_i['Moderate'])) r13 = ctrl.Rule(antecedent=(PM10['Moderate'] & PM25['Poor'] & NO2['Very Poor'] & NH3['Good']),consequent=(air_q_i['Poor'])) r14 = ctrl.Rule(antecedent=(PM10['Moderate'] & PM25['Poor'] & NH3['Moderate']),consequent=(air_q_i['Very Poor'])) r15 = ctrl.Rule(antecedent=(PM10['Moderate'] & PM25['Very Poor']),consequent=(air_q_i['Very Poor'])) r16 = ctrl.Rule(antecedent=(PM10['Moderate'] & PM25['Severe']),consequent=(air_q_i['Severe'])) r17 = ctrl.Rule(antecedent=(PM10['Poor'] & PM25['Very Poor']),consequent=(air_q_i['Poor'])) r18 = ctrl.Rule(antecedent=(PM10['Poor'] & PM25['Severe'] & NH3['Good']),consequent=(air_q_i['Very Poor'])) r19 = ctrl.Rule(antecedent=(PM10['Poor'] & PM25['Severe'] & NH3['Satisfactory']),consequent=(air_q_i['Severe'])) r20 = ctrl.Rule(antecedent=(PM10['Very Poor'] & PM25['Severe']),consequent=(air_q_i['Severe'])) r21 = ctrl.Rule(antecedent=(PM10['Severe'] & PM25['Severe'] ),consequent=(air_q_i['Severe'])) r22 = ctrl.Rule(antecedent=(PM10['Very Poor'] & PM25['Very Poor']),consequent=(air_q_i['Very Poor'])) r23 = ctrl.Rule(antecedent=(PM10['Severe']),consequent=(air_q_i['Severe'])) r24 = ctrl.Rule(CO['Good'] , air_q_i['Good']) r25 = ctrl.Rule(CO['Poor'] , air_q_i['Poor']) r26 = ctrl.Rule(CO['Very Poor'] , air_q_i['Very Poor']) r27 = ctrl.Rule(CO['Satisfactory'] , air_q_i['Satisfactory']) r28 = ctrl.Rule(CO['Severe'] , air_q_i['Severe']) r29 = ctrl.Rule(CO['Moderate'] , air_q_i['Moderate']) r20.view() tx = [r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12,r13,r14,r15,r16,r17,r18,r19,r20,r21,r22,r23,r24,r25,r26,r27,r28,r29] #sample: Mapping undefined rules to defined rules r1.view() r3.view() '''tipping_ctrl = ctrl.ControlSystem(xx) tipping = ctrl.ControlSystemSimulation(tipping_ctrl) ''' Air_ctrl = ctrl.ControlSystem(tx) Air_calc = ctrl.ControlSystemSimulation(Air_ctrl) Air_calc # Pass inputs to the ControlSystem using Antecedent labels with Pythonic API # Note: if you like passing many inputs all at once Air_calc.input['CO'] = 15 Air_calc.input['PM10'] = 80 Air_calc.input['NO2'] = 89 Air_calc.input['O3'] = 55 Air_calc.input['NH3'] = 88 Air_calc.input['PM2.5'] = 57 Air_calc.input['SO2'] = 900 # compute the input and gaain the output Air_calc.compute() print(Air_calc.output['air_q_i']) def Final_output(x): if x <= 50 and x >= 0: return "Good" elif x <= 101 and x > 50: return "Satisfactory" elif x <= 200 and x > 101: return "Moderate" elif x <= 300 and x > 200: return "Poor" elif x <= 400 and x > 300: return "Very Poor" elif x > 400: return "Severe" else: return "Unknown" print(Final_output(Air_calc.output['air_q_i'])) air_q_i.view(sim=Air_calc)
0.120542
0.375621
# Using MXNet DALI plugin: using various readers ### Overview This example shows how different readers could be used to interact with MXNet. It shows how flexible DALI is. The following readers are used in this example: - MXNetReader - CaffeReader - FileReader - TFRecordReader For details on how to use them please see other [examples](..). Let us start from defining some global constants ``` # MXNet RecordIO db_folder = "/data/imagenet/train-480-val-256-recordio/" # Caffe LMDB lmdb_folder = "/data/imagenet/train-lmdb-256x256" # image dir with plain jpeg files image_dir = "../images" # TFRecord tfrecord = "/data/imagenet/train-val-tfrecord-480/train-00001-of-01024" tfrecord_idx = "idx_files/train-00001-of-01024.idx" tfrecord2idx_script = "tfrecord2idx" N = 8 # number of GPUs BATCH_SIZE = 128 # batch size per GPU ITERATIONS = 32 IMAGE_SIZE = 3 ``` Create idx file by calling `tfrecord2idx` script ``` from subprocess import call import os.path if not os.path.exists("idx_files"): os.mkdir("idx_files") if not os.path.isfile(tfrecord_idx): call([tfrecord2idx_script, tfrecord, tfrecord_idx]) ``` Let us define: - common part of pipeline, other pipelines will inherit it ``` from nvidia.dali.pipeline import Pipeline import nvidia.dali.ops as ops import nvidia.dali.types as types class CommonPipeline(Pipeline): def __init__(self, batch_size, num_threads, device_id): super(CommonPipeline, self).__init__(batch_size, num_threads, device_id) self.decode = ops.ImageDecoder(device = "mixed", output_type = types.RGB) self.resize = ops.Resize(device = "gpu", image_type = types.RGB, interp_type = types.INTERP_LINEAR) self.cmn = ops.CropMirrorNormalize(device = "gpu", output_dtype = types.FLOAT, crop = (227, 227), image_type = types.RGB, mean = [128., 128., 128.], std = [1., 1., 1.]) self.uniform = ops.Uniform(range = (0.0, 1.0)) self.resize_rng = ops.Uniform(range = (256, 480)) def base_define_graph(self, inputs, labels): images = self.decode(inputs) images = self.resize(images, resize_shorter = self.resize_rng()) output = self.cmn(images, crop_pos_x = self.uniform(), crop_pos_y = self.uniform()) return (output, labels) ``` - MXNetReaderPipeline ``` from nvidia.dali.pipeline import Pipeline import nvidia.dali.ops as ops import nvidia.dali.types as types class MXNetReaderPipeline(CommonPipeline): def __init__(self, batch_size, num_threads, device_id, num_gpus): super(MXNetReaderPipeline, self).__init__(batch_size, num_threads, device_id) self.input = ops.MXNetReader(path = [db_folder+"train.rec"], index_path=[db_folder+"train.idx"], random_shuffle = True, shard_id = device_id, num_shards = num_gpus) def define_graph(self): images, labels = self.input(name="Reader") return self.base_define_graph(images, labels) ``` - CaffeReadPipeline ``` class CaffeReadPipeline(CommonPipeline): def __init__(self, batch_size, num_threads, device_id, num_gpus): super(CaffeReadPipeline, self).__init__(batch_size, num_threads, device_id) self.input = ops.CaffeReader(path = lmdb_folder, random_shuffle = True, shard_id = device_id, num_shards = num_gpus) def define_graph(self): images, labels = self.input(name="Reader") return self.base_define_graph(images, labels) ``` - FileReadPipeline ``` class FileReadPipeline(CommonPipeline): def __init__(self, batch_size, num_threads, device_id, num_gpus): super(FileReadPipeline, self).__init__(batch_size, num_threads, device_id) self.input = ops.FileReader(file_root = image_dir) def define_graph(self): images, labels = self.input(name="Reader") return self.base_define_graph(images, labels) ``` - TFRecordPipeline ``` import nvidia.dali.tfrecord as tfrec class TFRecordPipeline(CommonPipeline): def __init__(self, batch_size, num_threads, device_id, num_gpus): super(TFRecordPipeline, self).__init__(batch_size, num_threads, device_id) self.input = ops.TFRecordReader(path = tfrecord, index_path = tfrecord_idx, features = {"image/encoded" : tfrec.FixedLenFeature((), tfrec.string, ""), "image/class/label": tfrec.FixedLenFeature([1], tfrec.int64, -1) }) def define_graph(self): inputs = self.input(name="Reader") images = inputs["image/encoded"] labels = inputs["image/class/label"] return self.base_define_graph(images, labels) ``` Let us create pipelines and pass them to MXNet generic iterator ``` from __future__ import print_function import numpy as np from nvidia.dali.plugin.mxnet import DALIGenericIterator pipe_types = [[MXNetReaderPipeline, (0, 999)], [CaffeReadPipeline, (0, 999)], [FileReadPipeline, (0, 1)], [TFRecordPipeline, (1, 1000)]] for pipe_t in pipe_types: pipe_name, label_range = pipe_t print ("RUN: " + pipe_name.__name__) pipes = [pipe_name(batch_size=BATCH_SIZE, num_threads=2, device_id = device_id, num_gpus = N) for device_id in range(N)] pipes[0].build() dali_iter = DALIGenericIterator(pipes, [('data', DALIGenericIterator.DATA_TAG), ('label', DALIGenericIterator.LABEL_TAG)], pipes[0].epoch_size("Reader")) for i, data in enumerate(dali_iter): if i >= ITERATIONS: break # Testing correctness of labels for d in data: label = d.label[0].asnumpy() image = d.data[0] ## labels need to be integers assert(np.equal(np.mod(label, 1), 0).all()) ## labels need to be in range pipe_name[2] assert((label >= label_range[0]).all()) assert((label <= label_range[1]).all()) print("OK : " + pipe_name.__name__) ```
github_jupyter
# MXNet RecordIO db_folder = "/data/imagenet/train-480-val-256-recordio/" # Caffe LMDB lmdb_folder = "/data/imagenet/train-lmdb-256x256" # image dir with plain jpeg files image_dir = "../images" # TFRecord tfrecord = "/data/imagenet/train-val-tfrecord-480/train-00001-of-01024" tfrecord_idx = "idx_files/train-00001-of-01024.idx" tfrecord2idx_script = "tfrecord2idx" N = 8 # number of GPUs BATCH_SIZE = 128 # batch size per GPU ITERATIONS = 32 IMAGE_SIZE = 3 from subprocess import call import os.path if not os.path.exists("idx_files"): os.mkdir("idx_files") if not os.path.isfile(tfrecord_idx): call([tfrecord2idx_script, tfrecord, tfrecord_idx]) from nvidia.dali.pipeline import Pipeline import nvidia.dali.ops as ops import nvidia.dali.types as types class CommonPipeline(Pipeline): def __init__(self, batch_size, num_threads, device_id): super(CommonPipeline, self).__init__(batch_size, num_threads, device_id) self.decode = ops.ImageDecoder(device = "mixed", output_type = types.RGB) self.resize = ops.Resize(device = "gpu", image_type = types.RGB, interp_type = types.INTERP_LINEAR) self.cmn = ops.CropMirrorNormalize(device = "gpu", output_dtype = types.FLOAT, crop = (227, 227), image_type = types.RGB, mean = [128., 128., 128.], std = [1., 1., 1.]) self.uniform = ops.Uniform(range = (0.0, 1.0)) self.resize_rng = ops.Uniform(range = (256, 480)) def base_define_graph(self, inputs, labels): images = self.decode(inputs) images = self.resize(images, resize_shorter = self.resize_rng()) output = self.cmn(images, crop_pos_x = self.uniform(), crop_pos_y = self.uniform()) return (output, labels) from nvidia.dali.pipeline import Pipeline import nvidia.dali.ops as ops import nvidia.dali.types as types class MXNetReaderPipeline(CommonPipeline): def __init__(self, batch_size, num_threads, device_id, num_gpus): super(MXNetReaderPipeline, self).__init__(batch_size, num_threads, device_id) self.input = ops.MXNetReader(path = [db_folder+"train.rec"], index_path=[db_folder+"train.idx"], random_shuffle = True, shard_id = device_id, num_shards = num_gpus) def define_graph(self): images, labels = self.input(name="Reader") return self.base_define_graph(images, labels) class CaffeReadPipeline(CommonPipeline): def __init__(self, batch_size, num_threads, device_id, num_gpus): super(CaffeReadPipeline, self).__init__(batch_size, num_threads, device_id) self.input = ops.CaffeReader(path = lmdb_folder, random_shuffle = True, shard_id = device_id, num_shards = num_gpus) def define_graph(self): images, labels = self.input(name="Reader") return self.base_define_graph(images, labels) class FileReadPipeline(CommonPipeline): def __init__(self, batch_size, num_threads, device_id, num_gpus): super(FileReadPipeline, self).__init__(batch_size, num_threads, device_id) self.input = ops.FileReader(file_root = image_dir) def define_graph(self): images, labels = self.input(name="Reader") return self.base_define_graph(images, labels) import nvidia.dali.tfrecord as tfrec class TFRecordPipeline(CommonPipeline): def __init__(self, batch_size, num_threads, device_id, num_gpus): super(TFRecordPipeline, self).__init__(batch_size, num_threads, device_id) self.input = ops.TFRecordReader(path = tfrecord, index_path = tfrecord_idx, features = {"image/encoded" : tfrec.FixedLenFeature((), tfrec.string, ""), "image/class/label": tfrec.FixedLenFeature([1], tfrec.int64, -1) }) def define_graph(self): inputs = self.input(name="Reader") images = inputs["image/encoded"] labels = inputs["image/class/label"] return self.base_define_graph(images, labels) from __future__ import print_function import numpy as np from nvidia.dali.plugin.mxnet import DALIGenericIterator pipe_types = [[MXNetReaderPipeline, (0, 999)], [CaffeReadPipeline, (0, 999)], [FileReadPipeline, (0, 1)], [TFRecordPipeline, (1, 1000)]] for pipe_t in pipe_types: pipe_name, label_range = pipe_t print ("RUN: " + pipe_name.__name__) pipes = [pipe_name(batch_size=BATCH_SIZE, num_threads=2, device_id = device_id, num_gpus = N) for device_id in range(N)] pipes[0].build() dali_iter = DALIGenericIterator(pipes, [('data', DALIGenericIterator.DATA_TAG), ('label', DALIGenericIterator.LABEL_TAG)], pipes[0].epoch_size("Reader")) for i, data in enumerate(dali_iter): if i >= ITERATIONS: break # Testing correctness of labels for d in data: label = d.label[0].asnumpy() image = d.data[0] ## labels need to be integers assert(np.equal(np.mod(label, 1), 0).all()) ## labels need to be in range pipe_name[2] assert((label >= label_range[0]).all()) assert((label <= label_range[1]).all()) print("OK : " + pipe_name.__name__)
0.436862
0.810741
# Modul 05 - Multi Qubits & Verschränkte Zustände Eine Schulungsserie der Meetup-Gruppe **[Quantum Computing meets Business - Rhineland](https://www.meetup.com/de-DE/Quantum-Computing-meets-Business-Rhineland/)** (Adapted from [qiskit-textbook](https://github.com/qiskit-community/qiskit-textbook)) ``` run ./00-Inhalt_Tools.ipynb ``` Einzelne Qubits sind interessant, aber einzeln bieten sie keinen rechnerischen Vorteil. Wir werden uns nun ansehen, wie wir mehrere Qubits darstellen und wie diese Qubits miteinander interagieren können. Wir haben gesehen, wie wir den Zustand eines Qubits mit einem 2D-Vektor darstellen können, jetzt werden wir sehen, wie wir den Zustand mehrerer Qubits darstellen können. ## Inhaltsverzeichnis 1. [Darstellung von Multi-Qubit Zuständen](#represent) 1.1 [Übungen](#ex1) 2. [Ein-Qubit Gatter auf Multi-Qubit Zustandsvektor](#single-qubit-gates) 2.1 [Übungen](#ex2) 3. [Multi-Qubit Gatter](#multi-qubit-gates) 3.1 [Das CNOT-Gatter](#cnot) 3.2 [Verschränkte Zustände](#entangled) 3.3 [Visualisierung verschränkter Zustände](#visual) 3.4 [Übungen](#ex3) ## 1. Darstellung von Multi-Qubit Zuständen <a id="represent"></a> Wir haben gesehen, dass ein einzelnes Bit zwei mögliche Zustände hat, und ein Qubit-Zustand hat zwei komplexe Amplituden. Analog dazu haben zwei Bits vier mögliche Zustände: `00` `01` `10` `11` Und um den Zustand von zwei Qubits zu beschreiben, braucht man vier komplexe Amplituden. Wir speichern diese Amplituden in einem 4D-Vektor wie folgt: $$ |a\rangle = a_{00}|00\rangle + a_{01}|01\rangle + a_{10}|10\rangle + a_{11}|11\rangle = \begin{bmatrix} a_{00} \\ a_{01} \\ a_{10} \\ a_{11} \end{bmatrix} $$ Die Regeln der Messung funktionieren immer noch auf die gleiche Weise: $$ p(|00\rangle) = |\langle 00 | a \rangle |^2 = |a_{00}|^2$$ Und die gleichen Implikationen gelten, wie die Normalisierungsbedingung: $$ |a_{00}|^2 + |a_{01}|^2 + |a_{10}|^2 + |a_{11}|^2 = 1$$ Wenn wir zwei getrennte Qubits haben, können wir ihren gemeinsamen Zustand durch das Tensorprodukt beschreiben: $$ |a\rangle = \begin{bmatrix} a_0 \\ a_1 \end{bmatrix}, \quad |b\rangle = \begin{bmatrix} b_0 \\ b_1 \end{bmatrix} $$ $$ |ba\rangle = |b\rangle \otimes |a\rangle = \begin{bmatrix} b_0 \times \begin{bmatrix} a_0 \\ a_1 \end{bmatrix} \\ b_1 \times \begin{bmatrix} a_0 \\ a_1 \end{bmatrix} \end{bmatrix} = \begin{bmatrix} b_0 a_0 \\ b_0 a_1 \\ b_1 a_0 \\ b_1 a_1 \end{bmatrix} $$ Und nach den gleichen Regeln können wir das Tensorprodukt verwenden, um den kollektiven Zustand einer beliebigen Anzahl von Qubits zu beschreiben. Hier ist ein Beispiel mit drei Qubits: $$ |cba\rangle = \begin{bmatrix} c_0 b_0 a_0 \\ c_0 b_0 a_1 \\ c_0 b_1 a_0 \\ c_0 b_1 a_1 \\ c_1 b_0 a_0 \\ c_1 b_0 a_1 \\ c_1 b_1 a_0 \\ c_1 b_1 a_1 \\ \end{bmatrix} $$ Wenn wir $n$ Qubits haben, müssen wir $2^n$ komplexe Amplituden im Auge behalten. Wie wir sehen können, wachsen diese Vektoren exponentiell mit der Anzahl der Qubits. Dies ist der Grund, warum Quantencomputer mit einer großen Anzahl von Qubits so schwierig zu simulieren sind. Ein moderner Laptop kann leicht einen allgemeinen Quantenzustand von etwa 20 Qubits simulieren, aber die Simulation von 100 Qubits ist zu schwierig für die größten Supercomputer. Schauen wir uns eine Beispielschaltung an: ``` from qiskit import QuantumCircuit, Aer, execute from math import pi import numpy as np from qiskit.visualization import plot_histogram, plot_bloch_multivector qc = QuantumCircuit(3) # H-Gatter auf jedes Qubit anwenden: for qubit in range(3): qc.h(qubit) # See the circuit: qc.draw() ``` Jedes Qubit befindet sich im Zustand $|+\rangle$, wir sollten also den Vektor sehen: $$ |{+++}\rangle = \frac{1}{\sqrt{8}}\begin{bmatrix} 1 \\ 1 \\ 1 \\ 1 \\ 1 \\ 1 \\ 1 \\ 1 \\ \end{bmatrix} $$ ``` # Lassen Sie uns das Ergebnis sehen svsim = Aer.get_backend('statevector_simulator') job = execute(qc, svsim) final_state = job.result().get_statevector() print(final_state) ``` Und wir haben unser erwartetes Ergebnis. ### 1.2 Kurzübungen: <a id="ex1"></a> 1. Schreiben Sie das Tensorprodukt der Qubits auf: a) $|0\rangle|1\rangle$ b) $|0\rangle|+\rangle$ c) $|+\rangle|1\rangle$ d) $|-\rangle|+\rangle$ 2. Schreiben Sie den Zustand: $|\psi\rangle = \tfrac{1}{\sqrt{2}}|00\rangle + \tfrac{i}{\sqrt{2}}|01\rangle $ als zwei einzelne Qubits ## 2. Ein-Qubit Gatter auf Multi-Qubit Zustandsvektor <a id="single-qubit-gates"></a> Wir haben gesehen, dass ein X-Gatter durch die Matrix dargestellt wird: $$ X = \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix} $$ Und dass es so auf den Zustand $|0\rangle$ wirkt: $$ X|0\rangle = \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}\begin{bmatrix} 1 \\ 0 \end{bmatrix} = \begin{bmatrix} 0 \\ 1\end{bmatrix} $$ aber es ist vielleicht nicht klar, wie ein X-Gatter auf ein Qubit in einem Multi-Qubit-Vektor wirken würde. Glücklicherweise ist die Regel recht einfach; so wie wir das Tensorprodukt verwendet haben, um Multi-Qubit-Zustandsvektoren zu berechnen, verwenden wir das Tensorprodukt, um Matrizen zu berechnen, die auf diese Zustandsvektoren wirken. Zum Beispiel in der folgenden Schaltung: ``` qc = QuantumCircuit(2) qc.h(0) qc.x(1) qc.draw() ``` wir können die gleichzeitigen Operationen (H & X) durch ihr Tensorprodukt darstellen: $$ X|q_1\rangle \otimes H|q_0\rangle = (X\otimes H)|q_1 q_0\rangle $$ Die Operation sieht folgendermaßen aus: $$ X\otimes H = \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix} \otimes \tfrac{1}{\sqrt{2}}\begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix} = \frac{1}{\sqrt{2}} \begin{bmatrix} 0 \times \begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix} & 1 \times \begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix} \\ 1 \times \begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix} & 0 \times \begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix} \end{bmatrix} = \frac{1}{\sqrt{2}} \begin{bmatrix} 0 & 0 & 1 & 1 \\ 0 & 0 & 1 & -1 \\ 1 & 1 & 0 & 0 \\ 1 & -1 & 0 & 0 \\ \end{bmatrix} $$ Die wir dann auf unseren 4D-Zustandsvektor $|q_1 q_0\rangle$ anwenden können. Das kann ziemlich unübersichtlich werden, Sie werden also oft die klarere Schreibweise sehen: $$ X\otimes H = \begin{bmatrix} 0 & H \\ H & 0\\ \end{bmatrix} $$ Anstatt dies von Hand zu berechnen, können wir den `unitary_simulator` von Qiskit verwenden, um dies für uns zu tun. Der unitäre Simulator multipliziert alle Gatter in unserer Schaltung miteinander, um eine einzige unitäre Matrix zu kompilieren, die die gesamte Quantenschaltung ausführt: ``` usim = Aer.get_backend('unitary_simulator') job = execute(qc, usim) unitary = job.result().get_unitary() ``` und sehen Sie sich die Ergebnisse an: ``` print(unitary) ``` Wenn wir ein Gatter jeweils nur auf ein Qubit anwenden wollen (wie in der Schaltung unten), beschreiben wir dies durch ein Tensorprodukt mit der Identitätsmatrix, z. B.: $$ X \otimes I $$ ``` qc = QuantumCircuit(2) qc.x(1) qc.draw() # Simulate the unitary usim = Aer.get_backend('unitary_simulator') job = execute(qc, usim) unitary = job.result().get_unitary() print(unitary) ``` Wir können sehen, dass Qiskit das Tensorprodukt durchgeführt hat: $$ X \otimes I = \begin{bmatrix} 0 & I \\ I & 0\\ \end{bmatrix} = \begin{bmatrix} 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1 \\ 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ \end{bmatrix} $$ ### 2.1 Kurzübungen: <a id="ex2"></a> 1. Berechnen Sie die Ein-Qubit-Unitary ($U$), die durch die Folge von Gattern erzeugt wird: $U = XZH$. Verwenden Sie den Unitary-Simulator von Qiskit, um Ihre Ergebnisse zu überprüfen. 2. Versuchen Sie, die Gatter in der obigen Schaltung zu verändern. Berechnen Sie deren Tensorprodukt und überprüfen Sie dann Ihre Antwort mit dem Unitary Simulator. **Hinweis:** Verschiedene Bücher, Softwares und Webseiten ordnen ihre Qubits unterschiedlich an. Das bedeutet, dass das Tensorprodukt der gleichen Schaltung sehr unterschiedlich aussehen kann. Versuchen Sie, dies zu berücksichtigen, wenn Sie andere Quellen zu Rate ziehen. ## 3. Multi-Qubit Gatter <a id="multi-qubit-gates"></a> Nachdem wir nun wissen, wie man den Zustand mehrerer Qubits darstellen kann, sind wir nun bereit zu lernen, wie Qubits miteinander interagieren. Ein wichtiges Zwei-Qubit-Gatter ist das CNOT-Gatter. ### 3.1 Das CNOT-Gatter <a id="cnot"></a> Sie haben dieses Gatter bereits in _[The Atoms of Computation](../ch-states/atoms-computation.html) kennengelernt._ Dieses Gatter ist ein bedingtes Gatter, das ein X-Gatter auf dem zweiten Qubit (Ziel) durchführt, wenn der Zustand des ersten Qubits (Steuerung) $|1\rangle$ ist. Das Gatter wird auf einer Schaltung wie dieser gezeichnet, mit `q0` als Steuerung und `q1` als Ziel: ``` qc = QuantumCircuit(2) # Apply CNOT qc.cx(0,1) # See the circuit: qc.draw() ``` Wenn unsere Qubits nicht in Überlagerung von $|0\rangle$ oder $|1\rangle$ sind (sich also wie klassische Bits verhalten), ist dieses Gatter sehr einfach und intuitiv zu verstehen. Wir können die klassische Wahrheitstabelle verwenden: | Input (t,c) | Output (t,c) | |:-----------:|:------------:| | 00 | 00 | | 01 | 11 | | 10 | 10 | | 11 | 01 | Und auf unseren 4D-Zustandsvektor wirkend, hat er eine der beiden Matrizen: $$ \text{CNOT} = \begin{bmatrix} 1 & 0 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0 \\ 0 & 1 & 0 & 0 \\ \end{bmatrix}, \quad \text{CNOT} = \begin{bmatrix} 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0 \\ \end{bmatrix} $$ je nachdem, welches Qubit die Steuerung und welches das Ziel ist. Verschiedene Bücher, Simulatoren und Papiere ordnen ihre Qubits unterschiedlich an. In unserem Fall entspricht die linke Matrix dem CNOT in der obigen Schaltung. Diese Matrix vertauscht die Amplituden von $|01\rangle$ und $|11\rangle$ in unserem Zustandsvektor: $$ |a\rangle = \begin{bmatrix} a_{00} \\ a_{01} \\ a_{10} \\ a_{11} \end{bmatrix}, \quad \text{CNOT}|a\rangle = \begin{bmatrix} a_{00} \\ a_{11} \\ a_{10} \\ a_{01} \end{bmatrix} \begin{matrix} \\ \leftarrow \\ \\ \leftarrow \end{matrix} $$ Wir haben gesehen, wie sich dies auf klassische Zustände auswirkt, aber nun wollen wir sehen, wie es sich auf ein Qubit in Superposition auswirkt. Wir werden ein Qubit in den Zustand $|+\rangle$ versetzen: ``` qc = QuantumCircuit(2) # Apply H-gate to the first: qc.h(0) qc.draw() # Let's see the result: svsim = Aer.get_backend('statevector_simulator') job = execute(qc, svsim) final_state = job.result().get_statevector() print(final_state) ``` Erwartungsgemäß ergibt dies den Zustand $|0\rangle \otimes |{+}\rangle = |0{+}\rangle$: $$ |0{+}\rangle = \tfrac{1}{\sqrt{2}}(|00\rangle + |01\rangle) $$ Und schauen wir mal, was passiert, wenn wir das CNOT-Gatter anwenden: ``` qc = QuantumCircuit(2) # Apply H-gate to the first: qc.h(0) # Apply a CNOT: qc.cx(0,1) qc.draw() # Let's get the result: job = execute(qc, svsim) result = job.result() # Print the statevector neatly: final_state = result.get_statevector() print(final_state) ``` Wir sehen, wir haben den Zustand: $$ \text{CNOT}|0{+}\rangle = \tfrac{1}{\sqrt{2}}(|00\rangle + |11\rangle) $$ Dieser Zustand ist für uns sehr interessant, weil er _verschränkt_ ist, was uns nahtlos zum nächsten Abschnitt führt. ### 3.2 Verschränkte Zustände <a id="entangled"></a> Wir haben im vorigen Abschnitt gesehen, dass wir den Zustand erzeugen können: $$ \tfrac{1}{\sqrt{2}}(|00\rangle + |11\rangle) $$ Dies ist ein sogenannter _Bell_-Zustand. Wir sehen, dass dieser Zustand eine 50%ige Wahrscheinlichkeit hat, im Zustand $|00\rangle$ gemessen zu werden, und eine 50%ige Chance, im Zustand $|11\rangle$ gemessen zu werden. Am interessantesten ist, dass er eine **0%ige** Chance hat, in den Zuständen $|01\rangle$ oder $|10\rangle$ gemessen zu werden. Wir können dies in Qiskit sehen: ``` plot_histogram(result.get_counts()) ``` Dieser kombinierte Zustand kann nicht als zwei separate Qubit-Zustände geschrieben werden, was interessante Implikationen hat. Obwohl sich unsere Qubits in Superposition befinden, wird uns die Messung des einen den Zustand des anderen verraten und dessen Superposition kollabieren lassen. Wenn wir zum Beispiel das obere Qubit messen und den Zustand $|1\rangle$ erhalten, ändert sich der kollektive Zustand unserer Qubits wie folgt: $$ \tfrac{1}{\sqrt{2}}(|00\rangle + |11\rangle) \quad \xrightarrow[]{\text{measure}} \quad |11\rangle $$ Selbst wenn wir diese Qubits Lichtjahre entfernt trennen würden, kollabiert die Messung eines Qubits die Superposition und scheint eine unmittelbare Wirkung auf das andere zu haben. Dies ist die ['spukhafte Fernwirkung'](https://en.wikipedia.org/wiki/Quantum_nonlocality), die so viele Physiker im frühen 20. Jahrhundert aufregte. Es ist wichtig zu beachten, dass das Messergebnis zufällig ist und die Messstatistik des einen Qubits **nicht** durch irgendeine Operation am anderen Qubit beeinflusst wird. Aus diesem Grund gibt es **keine** Möglichkeit, gemeinsame Quantenzustände zur Kommunikation zu nutzen. Dies ist bekannt als das No-Communication-Theorem.[1] ### 3.3 Visualisierung verschränkter Zustände<a id="visual"></a> Wir haben gesehen, dass dieser Zustand nicht als zwei separate Qubit-Zustände geschrieben werden kann. Das bedeutet auch, dass wir Informationen verlieren, wenn wir versuchen, unseren Zustand auf separaten Bloch-Kugeln darzustellen: ``` plot_bloch_multivector(final_state) ``` Wenn man bedenkt, wie wir die Bloch-Sphäre in den früheren Kapiteln definiert haben, ist es vielleicht nicht klar, wie Qiskit die Bloch-Vektoren mit verschränkten Qubits überhaupt so berechnet. Im Ein-Qubit-Fall entspricht die Position des Bloch-Vektors entlang einer Achse sehr schön dem Erwartungswert der Messung in dieser Basis. Wenn wir dies als _die_ Regel für die Darstellung von Bloch-Vektoren nehmen, kommen wir zu der oben genannten Schlussfolgerung. Dies zeigt uns, dass es _keine_ Ein-Qubit-Messbasis gibt, für die eine bestimmte Messung garantiert ist. Dies steht im Gegensatz zu unseren Einzel-Qubit-Zuständen, bei denen wir immer eine Einzel-Qubit-Basis wählen konnten. Wenn wir die einzelnen Qubits auf diese Weise betrachten, verpassen wir den wichtigen Effekt der Korrelation zwischen den Qubits. Wir können nicht zwischen verschiedenen verschränkten Zuständen unterscheiden. Zum Beispiel, die zwei Zustände: $$\tfrac{1}{\sqrt{2}}(|01\rangle + |10\rangle) \quad \text{and} \quad \tfrac{1}{\sqrt{2}}(|00\rangle + |11\rangle)$$ sehen beide auf diesen separaten Bloch-Kugeln gleich aus, obwohl es sich um sehr unterschiedliche Zustände mit unterschiedlichen Messergebnissen handelt. Wie könnten wir diesen Zustandsvektor sonst visualisieren? Dieser Zustandsvektor ist einfach eine Sammlung von vier Amplituden (komplexe Zahlen), und es gibt unendlich viele Möglichkeiten, diese auf ein Bild abzubilden. Eine solche Visualisierung ist die _Q-Kugel,_ hier wird jede Amplitude durch einen Blob auf der Oberfläche einer Kugel dargestellt. Die Größe des Kleckses ist proportional zum Betrag der Amplitude, und die Farbe ist proportional zur Phase der Amplitude. Die Amplituden für $|00\rangle$ und $|11\rangle$ sind gleich, und alle anderen Amplituden sind 0: ``` from qiskit.visualization import plot_state_qsphere plot_state_qsphere(final_state) ``` Hier können wir deutlich die Korrelation zwischen den Qubits sehen. Die Form der Q-Sphäre hat keine Bedeutung, sie ist einfach eine nette Art, unsere Blobs anzuordnen; die Anzahl der `0` im Zustand ist proportional zur Position des Zustands auf der Z-Achse, so dass wir hier sehen können, dass die Amplitude von $|00\rangle$ am oberen Pol der Kugel liegt und die Amplitude von $|11\rangle$ am unteren Pol der Kugel. ### 3.4 Übung: <a id="ex3"></a> 1. Erstellen Sie eine Quantenschaltung, die den Bell-Zustand erzeugt: $\tfrac{1}{\sqrt{2}}(|01\rangle + |10\rangle)$. Verwenden Sie den Zustandsvektorsimulator, um Ihr Ergebnis zu überprüfen. 2. Die Schaltung, die Sie in Frage 1 erstellt haben, transformiert den Zustand $|00\rangle$ in $\tfrac{1}{\sqrt{2}}(|01\rangle + |10\rangle)$. Berechnen Sie die Unitarität dieser Schaltung mithilfe des Simulators von Qiskit. Überprüfen Sie, ob diese Unitary tatsächlich die korrekte Transformation durchführt. 3. Überlegen Sie, wie Sie einen Zustandsvektor visuell darstellen können. Können Sie eine interessante Visualisierung entwerfen, aus der Sie den Betrag und die Phase der einzelnen Amplituden ablesen können? ## 4. Verweise [1] Asher Peres, Daniel R. Terno, _Quantum Information and Relativity Theory,_ 2004, https://arxiv.org/abs/quant-ph/0212023 ## Qiskit ``` import qiskit qiskit.__qiskit_version__ ``` ## Copyright Copyright 2021 Dual Software GmbH and SVA System Vertrieb Alexander GmbH Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
github_jupyter
run ./00-Inhalt_Tools.ipynb from qiskit import QuantumCircuit, Aer, execute from math import pi import numpy as np from qiskit.visualization import plot_histogram, plot_bloch_multivector qc = QuantumCircuit(3) # H-Gatter auf jedes Qubit anwenden: for qubit in range(3): qc.h(qubit) # See the circuit: qc.draw() # Lassen Sie uns das Ergebnis sehen svsim = Aer.get_backend('statevector_simulator') job = execute(qc, svsim) final_state = job.result().get_statevector() print(final_state) qc = QuantumCircuit(2) qc.h(0) qc.x(1) qc.draw() usim = Aer.get_backend('unitary_simulator') job = execute(qc, usim) unitary = job.result().get_unitary() print(unitary) qc = QuantumCircuit(2) qc.x(1) qc.draw() # Simulate the unitary usim = Aer.get_backend('unitary_simulator') job = execute(qc, usim) unitary = job.result().get_unitary() print(unitary) qc = QuantumCircuit(2) # Apply CNOT qc.cx(0,1) # See the circuit: qc.draw() qc = QuantumCircuit(2) # Apply H-gate to the first: qc.h(0) qc.draw() # Let's see the result: svsim = Aer.get_backend('statevector_simulator') job = execute(qc, svsim) final_state = job.result().get_statevector() print(final_state) qc = QuantumCircuit(2) # Apply H-gate to the first: qc.h(0) # Apply a CNOT: qc.cx(0,1) qc.draw() # Let's get the result: job = execute(qc, svsim) result = job.result() # Print the statevector neatly: final_state = result.get_statevector() print(final_state) plot_histogram(result.get_counts()) plot_bloch_multivector(final_state) from qiskit.visualization import plot_state_qsphere plot_state_qsphere(final_state) import qiskit qiskit.__qiskit_version__
0.763484
0.97024
# HACK4LEM Golem IV - Invisible Banking ### Import TensorFlow and other libraries ``` import matplotlib.pyplot as plt import numpy as np import os import PIL import tensorflow as tf import tempfile import pathlib from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.models import Sequential from PIL import Image from pdf2image.exceptions import ( PDFInfoNotInstalledError, PDFPageCountError, PDFSyntaxError ) from pdf2image import convert_from_path def get_concat_v(im1, im2): dst = Image.new('RGB', (im1.width, im1.height + im2.height)) dst.paste(im1, (0, 0)) dst.paste(im2, (0, im1.height)) return dst data_dir = pathlib.Path('.') pdfs = list(data_dir.glob('pdfs/*.pdf')) for n, pdf in enumerate(pdfs): pages = convert_from_path(pdf, 500) for i, page in enumerate(pages): if (i == 0): if (len(pages) == 1): pname = 'pdf' + str(n) + '.png' page.save(pname, 'PNG') else : pname = 'tmp/__' + str(i) + '.png' page.save(pname, 'PNG') if (i == 1): im1 = Image.open('tmp/__0.png') im2 = Image.open('tmp/__1.png') if (len(pages) == 2): get_concat_v(im1, im2).save('pdf' + str(n) + '.png', 'PNG') else : get_concat_v(im1, im2).save('tmp/__0_1.png', 'PNG') if (i == 2): im3 = Image.open('tmp/__0_1.png') im4 = Image.open('tmp/__2.png') if (len(pages) == 3): get_concat_v(im3, im4).save('pdf' + str(n) + '.png', 'PNG') SIZE = 512 pdfs = list(data_dir.glob('*.png')) for pdf in pdfs: im = Image.open(pdf) if (im.width > SIZE or im.height > SIZE): im.thumbnail((SIZE, SIZE)) im.save(pdf, 'PNG') ``` # Explore the dataset ``` data_dir = pathlib.Path('.') image_count = len(list(data_dir.glob('out/*/*.png'))) print(image_count) pko_invoices = list(data_dir.glob('out/pko/*.png')) PIL.Image.open(str(pko_invoices[0])) plus_invoices = list(data_dir.glob('out/plus/*.png')) PIL.Image.open(str(plus_invoices[0])) plus_invoices = list(data_dir.glob('out/vw/*.png')) PIL.Image.open(str(plus_invoices[0])) ``` ### Load using keras.preprocessing Create a dataset ``` batch_size = 32 img_height = 224 img_width = 224 data_dir = pathlib.Path('out') train_ds = tf.keras.preprocessing.image_dataset_from_directory( data_dir, validation_split = 0.2, subset = "training", seed = 123, image_size = (img_height, img_width), batch_size = batch_size ) val_ds = tf.keras.preprocessing.image_dataset_from_directory( data_dir, validation_split = 0.2, subset = "validation", seed = 123, image_size = (img_height, img_width), batch_size = batch_size ) class_names = train_ds.class_names print(class_names) ``` ### Visualize the data ``` plt.figure(figsize=(10, 10)) for images, labels in train_ds.take(1): for i in range(9): ax = plt.subplot(3, 3, i + 1) plt.imshow(images[i].numpy().astype("uint8")) plt.title(class_names[labels[i]]) plt.axis("off") for image_batch, labels_batch in train_ds: print(image_batch.shape) print(labels_batch.shape) break ``` ### Configure the dataset for performance ``` AUTOTUNE = tf.data.AUTOTUNE train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE) val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE) normalization_layer = layers.experimental.preprocessing.Rescaling(1./255) normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y)) image_batch, labels_batch = next(iter(normalized_ds)) first_image = image_batch[0] # Notice the pixels values are now in `[0,1]`. print(np.min(first_image), np.max(first_image)) ``` ### Create the model ``` num_classes = 3 model = Sequential([ layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)), layers.Conv2D(16, 3, padding='same', activation='relu'), layers.MaxPooling2D(), layers.Conv2D(32, 3, padding='same', activation='relu'), layers.MaxPooling2D(), layers.Conv2D(64, 3, padding='same', activation='relu'), layers.MaxPooling2D(), layers.Flatten(), layers.Dense(128, activation='relu'), layers.Dense(num_classes) ]) ``` ### Compile the model ``` model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) ``` ### Model summary ``` model.summary() ``` ### Train the model ``` epochs = 40 history = model.fit( train_ds, validation_data=val_ds, epochs=epochs ) ``` ### Visualize training results ``` acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs_range = range(epochs) plt.figure(figsize=(8, 8)) plt.subplot(1, 2, 1) plt.plot(epochs_range, acc, label='Training Accuracy') plt.plot(epochs_range, val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy') plt.subplot(1, 2, 2) plt.plot(epochs_range, loss, label='Training Loss') plt.plot(epochs_range, val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.show() ``` ### Predict on new data ``` loss, accuracy = model.evaluate(val_ds) print('Test accuracy :', accuracy) test_image_path = pathlib.Path('test_pko_pdf42.png') #test_image_path = pathlib.Path('test_plus_pdf38.png') #test_image_path = pathlib.Path('test_vw_pdf23.png') img = keras.preprocessing.image.load_img( test_image_path, target_size=(img_height, img_width) ) img_array = keras.preprocessing.image.img_to_array(img) img_array = tf.expand_dims(img_array, 0) # Create a batch predictions = model.predict(img_array) score = tf.nn.softmax(predictions[0]) print( "This invoice belongs to {} with a {:.2f} percent confidence." .format(class_names[np.argmax(score)], 100 * np.max(score)) ) PIL.Image.open(str(test_image_path)) !mkdir -p saved_model model.save('saved_model/lem_model.h5') # Recreate the exact same model, including its weights and the optimizer new_model = tf.keras.models.load_model('saved_model/lem_model.h5') #new_model.summary() test_image_path = pathlib.Path('test_plus_pdf38.png') img = keras.preprocessing.image.load_img( test_image_path, target_size = (img_height, img_width) ) img_array = keras.preprocessing.image.img_to_array(img) img_array = tf.expand_dims(img_array, 0) predictions = model.predict(img_array) score = tf.nn.softmax(predictions[0]) print( "This invoice belongs to {} with a {:.2f} percent confidence." .format(class_names[np.argmax(score)], 100 * np.max(score)) ) PIL.Image.open(str(test_image_path)) ```
github_jupyter
import matplotlib.pyplot as plt import numpy as np import os import PIL import tensorflow as tf import tempfile import pathlib from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.models import Sequential from PIL import Image from pdf2image.exceptions import ( PDFInfoNotInstalledError, PDFPageCountError, PDFSyntaxError ) from pdf2image import convert_from_path def get_concat_v(im1, im2): dst = Image.new('RGB', (im1.width, im1.height + im2.height)) dst.paste(im1, (0, 0)) dst.paste(im2, (0, im1.height)) return dst data_dir = pathlib.Path('.') pdfs = list(data_dir.glob('pdfs/*.pdf')) for n, pdf in enumerate(pdfs): pages = convert_from_path(pdf, 500) for i, page in enumerate(pages): if (i == 0): if (len(pages) == 1): pname = 'pdf' + str(n) + '.png' page.save(pname, 'PNG') else : pname = 'tmp/__' + str(i) + '.png' page.save(pname, 'PNG') if (i == 1): im1 = Image.open('tmp/__0.png') im2 = Image.open('tmp/__1.png') if (len(pages) == 2): get_concat_v(im1, im2).save('pdf' + str(n) + '.png', 'PNG') else : get_concat_v(im1, im2).save('tmp/__0_1.png', 'PNG') if (i == 2): im3 = Image.open('tmp/__0_1.png') im4 = Image.open('tmp/__2.png') if (len(pages) == 3): get_concat_v(im3, im4).save('pdf' + str(n) + '.png', 'PNG') SIZE = 512 pdfs = list(data_dir.glob('*.png')) for pdf in pdfs: im = Image.open(pdf) if (im.width > SIZE or im.height > SIZE): im.thumbnail((SIZE, SIZE)) im.save(pdf, 'PNG') data_dir = pathlib.Path('.') image_count = len(list(data_dir.glob('out/*/*.png'))) print(image_count) pko_invoices = list(data_dir.glob('out/pko/*.png')) PIL.Image.open(str(pko_invoices[0])) plus_invoices = list(data_dir.glob('out/plus/*.png')) PIL.Image.open(str(plus_invoices[0])) plus_invoices = list(data_dir.glob('out/vw/*.png')) PIL.Image.open(str(plus_invoices[0])) batch_size = 32 img_height = 224 img_width = 224 data_dir = pathlib.Path('out') train_ds = tf.keras.preprocessing.image_dataset_from_directory( data_dir, validation_split = 0.2, subset = "training", seed = 123, image_size = (img_height, img_width), batch_size = batch_size ) val_ds = tf.keras.preprocessing.image_dataset_from_directory( data_dir, validation_split = 0.2, subset = "validation", seed = 123, image_size = (img_height, img_width), batch_size = batch_size ) class_names = train_ds.class_names print(class_names) plt.figure(figsize=(10, 10)) for images, labels in train_ds.take(1): for i in range(9): ax = plt.subplot(3, 3, i + 1) plt.imshow(images[i].numpy().astype("uint8")) plt.title(class_names[labels[i]]) plt.axis("off") for image_batch, labels_batch in train_ds: print(image_batch.shape) print(labels_batch.shape) break AUTOTUNE = tf.data.AUTOTUNE train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE) val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE) normalization_layer = layers.experimental.preprocessing.Rescaling(1./255) normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y)) image_batch, labels_batch = next(iter(normalized_ds)) first_image = image_batch[0] # Notice the pixels values are now in `[0,1]`. print(np.min(first_image), np.max(first_image)) num_classes = 3 model = Sequential([ layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)), layers.Conv2D(16, 3, padding='same', activation='relu'), layers.MaxPooling2D(), layers.Conv2D(32, 3, padding='same', activation='relu'), layers.MaxPooling2D(), layers.Conv2D(64, 3, padding='same', activation='relu'), layers.MaxPooling2D(), layers.Flatten(), layers.Dense(128, activation='relu'), layers.Dense(num_classes) ]) model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) model.summary() epochs = 40 history = model.fit( train_ds, validation_data=val_ds, epochs=epochs ) acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs_range = range(epochs) plt.figure(figsize=(8, 8)) plt.subplot(1, 2, 1) plt.plot(epochs_range, acc, label='Training Accuracy') plt.plot(epochs_range, val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy') plt.subplot(1, 2, 2) plt.plot(epochs_range, loss, label='Training Loss') plt.plot(epochs_range, val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.show() loss, accuracy = model.evaluate(val_ds) print('Test accuracy :', accuracy) test_image_path = pathlib.Path('test_pko_pdf42.png') #test_image_path = pathlib.Path('test_plus_pdf38.png') #test_image_path = pathlib.Path('test_vw_pdf23.png') img = keras.preprocessing.image.load_img( test_image_path, target_size=(img_height, img_width) ) img_array = keras.preprocessing.image.img_to_array(img) img_array = tf.expand_dims(img_array, 0) # Create a batch predictions = model.predict(img_array) score = tf.nn.softmax(predictions[0]) print( "This invoice belongs to {} with a {:.2f} percent confidence." .format(class_names[np.argmax(score)], 100 * np.max(score)) ) PIL.Image.open(str(test_image_path)) !mkdir -p saved_model model.save('saved_model/lem_model.h5') # Recreate the exact same model, including its weights and the optimizer new_model = tf.keras.models.load_model('saved_model/lem_model.h5') #new_model.summary() test_image_path = pathlib.Path('test_plus_pdf38.png') img = keras.preprocessing.image.load_img( test_image_path, target_size = (img_height, img_width) ) img_array = keras.preprocessing.image.img_to_array(img) img_array = tf.expand_dims(img_array, 0) predictions = model.predict(img_array) score = tf.nn.softmax(predictions[0]) print( "This invoice belongs to {} with a {:.2f} percent confidence." .format(class_names[np.argmax(score)], 100 * np.max(score)) ) PIL.Image.open(str(test_image_path))
0.531939
0.727153
# Text-to-Speech # Installing and Importing the Dependencies ``` # run the following from the terminal #sudo apt-get install sox libsndfile1 ffmpeg # Installing the Dependencies !pip install wget unidecode !pip3.8 install pytorch_lightning # Upgrading pip and cloning the NVIDIA !pip3.8 install --upgrade pip !pip3.8 install torch BRANCH = 'r1.0.0rc1' !python3.8 -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[tts] # Imorting the libraries import os import pandas as pd import re import librosa from datasets import Dataset ``` # Loading the Dataset ``` # defining the root directory data_directory = "../Badaga_Corpus-v.0.1.0/" tagged_file = "Badaga-v0.1.0.xlsx" # loading the data tagged_file_path = os.path.join(data_directory, tagged_file) data_frame = pd.read_excel(tagged_file_path) # droping the missing values data_frame.dropna(inplace=True) gender = "F" # "M", "F" # loading the audio files data_frame["audio_file_name"] = data_frame["audio_file_name"].apply(lambda x: os.path.join(data_directory, "clips", x)) data_frame = data_frame[data_frame["gender"]==gender] # splitting the data to train and test using the aplit_index from transcription train_df = data_frame[data_frame["split_label"]!="test"] test_df = data_frame[data_frame["split_label"]=="test"] # printing the data train_df # creating the dictionary for both train and test and storing it as a json file import json audio_paths = list(train_df["audio_file_name"]) durations = list(train_df["duration"]) labels = list(train_df["translterated_script"]) my_dict = dict() i = 0 with open("files/tts_train.json", "w") as outfile: for path, dur, label in zip(audio_paths, durations, labels): my_dict = {"audio_filepath": path, "duration": dur, "text": label} json_object = json.dumps(my_dict) outfile.write(json_object + "\n") audio_paths = list(test_df["audio_file_name"]) durations = list(test_df["duration"]) labels = list(test_df["translterated_script"]) my_dict = dict() i = 0 with open("files/tts_test.json", "w") as outfile: for path, dur, label in zip(audio_paths, durations, labels): my_dict = {"audio_filepath": path, "duration": dur, "text": label} json_object = json.dumps(my_dict) outfile.write(json_object + "\n") ``` # Loading the Model ``` # loading the Tactron model and training directory = "tactron2-rbg-badaga-tts-"+gender.lower() if os.path.exists(directory): pass else: os.makedirs(directory) !python3.8 tacotron2.py sample_rate=16000 train_dataset=files/tts_train.json validation_datasets=files/tts_test.json exp_manager.exp_dir=$directory trainer.max_epochs=20 trainer.accelerator=null trainer.check_val_every_n_epoch=1 ```
github_jupyter
# run the following from the terminal #sudo apt-get install sox libsndfile1 ffmpeg # Installing the Dependencies !pip install wget unidecode !pip3.8 install pytorch_lightning # Upgrading pip and cloning the NVIDIA !pip3.8 install --upgrade pip !pip3.8 install torch BRANCH = 'r1.0.0rc1' !python3.8 -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[tts] # Imorting the libraries import os import pandas as pd import re import librosa from datasets import Dataset # defining the root directory data_directory = "../Badaga_Corpus-v.0.1.0/" tagged_file = "Badaga-v0.1.0.xlsx" # loading the data tagged_file_path = os.path.join(data_directory, tagged_file) data_frame = pd.read_excel(tagged_file_path) # droping the missing values data_frame.dropna(inplace=True) gender = "F" # "M", "F" # loading the audio files data_frame["audio_file_name"] = data_frame["audio_file_name"].apply(lambda x: os.path.join(data_directory, "clips", x)) data_frame = data_frame[data_frame["gender"]==gender] # splitting the data to train and test using the aplit_index from transcription train_df = data_frame[data_frame["split_label"]!="test"] test_df = data_frame[data_frame["split_label"]=="test"] # printing the data train_df # creating the dictionary for both train and test and storing it as a json file import json audio_paths = list(train_df["audio_file_name"]) durations = list(train_df["duration"]) labels = list(train_df["translterated_script"]) my_dict = dict() i = 0 with open("files/tts_train.json", "w") as outfile: for path, dur, label in zip(audio_paths, durations, labels): my_dict = {"audio_filepath": path, "duration": dur, "text": label} json_object = json.dumps(my_dict) outfile.write(json_object + "\n") audio_paths = list(test_df["audio_file_name"]) durations = list(test_df["duration"]) labels = list(test_df["translterated_script"]) my_dict = dict() i = 0 with open("files/tts_test.json", "w") as outfile: for path, dur, label in zip(audio_paths, durations, labels): my_dict = {"audio_filepath": path, "duration": dur, "text": label} json_object = json.dumps(my_dict) outfile.write(json_object + "\n") # loading the Tactron model and training directory = "tactron2-rbg-badaga-tts-"+gender.lower() if os.path.exists(directory): pass else: os.makedirs(directory) !python3.8 tacotron2.py sample_rate=16000 train_dataset=files/tts_train.json validation_datasets=files/tts_test.json exp_manager.exp_dir=$directory trainer.max_epochs=20 trainer.accelerator=null trainer.check_val_every_n_epoch=1
0.336222
0.630159
### 1. <h3>Describe the null hypotheses to which the p-values given in Table 3.4 correspond. Explain what conclusions you can draw based on these p-values. Your explanation should be phrased in terms of sales , TV , radio , and newspaper , rather than in terms of the coefficients of the linear model.</h3> Null Hypothesis - H0 - indicates that there is no relationship between X (predictor) and Y (response) P value - A smaller P value indicates that there exists an association between the predictor and response. In the given table medium P value TV <0.0001 radio <0.0001 newspaper 0.8599 Radio and TV have very low p-value ,which implies there exists strong relationship between TV,radio and Sales . Newspaper on the other hand has a higher P value which implies Null Hypothesis, thus it can be rejected from the model. ### 2. <h3>Carefully explain the differences between the KNN classifier and KNN regression methods.</h3> Although both are similar methods , they have some differences KNN Classifier - The end goal is to classify a point (assign a class to a point) . The Formula uses probablity to assign a class to a point KNN Regression - The end goal is to predict value for a new point . The formula assigns a value based on K nearest values. ### 3. <h3>Suppose we have a data set with five predictors, X 1 = GPA, X 2 = IQ, X 3 = Gender (1 for Female and 0 for Male), X 4 = Interaction between GPA and IQ, and X 5 = Interaction between GPA and Gender. The response is starting salary after graduation (in thousands of dollars). Suppose we use least squares to fit the model, and get β̂ 0 = 50, β̂ 1 = 20, β̂ 2 = 0.07, β̂ 3 = 35, β̂ 4 = 0.01, β̂ 5 = −10. (a) Which answer is correct, and why? i. For a fixed value of IQ and GPA, males earn more on average than females. ii. For a fixed value of IQ and GPA, females earn more on average than males. iii. For a fixed value of IQ and GPA, males earn more on average than females provided that the GPA is high enough. iv. For a fixed value of IQ and GPA, females earn more on average than males provided that the GPA is high enough. (b) Predict the salary of a female with IQ of 110 and a GPA of 4.0. (c) True or false: Since the coefficient for the GPA/IQ interaction term is very small, there is very little evidence of an interaction effect. Justify your answer.</h3> y = 50 + 20 x1 + 0.07 x2 + 35 x3 + 0.01 x1x2 - 10 x1x3 a) Given x1 and x2 are constant , y only depends on gender ( 0 male , 1 female ) male - y = 50 + 20 x1 + 0.07 x2 + 0.01 x1x2 female - y = 50 + 20 x1 + 0.07 x2 + 35 + 0.01 x1x2 - 10 x1x3 = 85 + 20 x1 + 0.07 x2 + 0.01 x1x2 - 10 x1 Thus after a suffiently high GPA males will earn more ( no negative term ) => iii b) 137.1 c) False , significance is determined by p value and not by coefficient ### 4. <h3> I collect a set of data (n = 100 observations) containing a single predictor and a quantitative response. I then fit a linear regression model to the data, as well as a separate cubic regression, i.e. Y = β 0 + β 1 X + β 2 X 2 + β 3 X 3 + . (a) Suppose that the true relationship between X and Y is linear, i.e. Y = β 0 + β 1 X + . Consider the training residual sum of squares (RSS) for the linear regression, and also the training RSS for the cubic regression. Would we expect one to be lower than the other, would we expect them to be the same, or is there not enough information to tell? Justify your answer (b) Answer (a) using test rather than training RSS. (c) Suppose that the true relationship between X and Y is not linear, but we don’t know how far it is from linear. Consider the training RSS for the linear regression, and also the training RSS for the cubic regression. Would we expect one to be lower than the other, would we expect them to be the same, or is there not enough information to tell? Justify your answer. (d) Answer (c) using test rather than training RSS. </h3> a) RSS for the Polynomial Regression will be lesser in training data , as the curve will try and fir as many points as possible b) As the actual relationship is linear , the polynomial model would have overfitted to the training data. Hence in case of test data , Linear model will have lesser RSS c) RSS for polynomial model will be lesser than the Linear one as the polynomial model can fit the points better in trraining data. d) Due to not knowing ' How far from linear ' we cannot make assumptions about the test data RSS. If the model is closer to linear then linear model will have less test RSS and vice versa ### 5. Find ai <img src="5.jpg" width=300 height=100 > ### 6. <h3>Using (3.4), argue that in the case of simple linear regression, the least squares line always passes through the point (x̄, ȳ).</h3> <img src="6.jpg" width> ### 7. <h3>It is claimed in the text that in the case of simple linear regression of Y onto X, the R 2 statistic (3.17) is equal to the square of the correlation between X and Y (3.18). Prove that this is the case. For simplicity, you may assume that x̄ = ȳ = 0.</h3> <img src="7.1.jpg" width=100 height=100>
github_jupyter
### 1. <h3>Describe the null hypotheses to which the p-values given in Table 3.4 correspond. Explain what conclusions you can draw based on these p-values. Your explanation should be phrased in terms of sales , TV , radio , and newspaper , rather than in terms of the coefficients of the linear model.</h3> Null Hypothesis - H0 - indicates that there is no relationship between X (predictor) and Y (response) P value - A smaller P value indicates that there exists an association between the predictor and response. In the given table medium P value TV <0.0001 radio <0.0001 newspaper 0.8599 Radio and TV have very low p-value ,which implies there exists strong relationship between TV,radio and Sales . Newspaper on the other hand has a higher P value which implies Null Hypothesis, thus it can be rejected from the model. ### 2. <h3>Carefully explain the differences between the KNN classifier and KNN regression methods.</h3> Although both are similar methods , they have some differences KNN Classifier - The end goal is to classify a point (assign a class to a point) . The Formula uses probablity to assign a class to a point KNN Regression - The end goal is to predict value for a new point . The formula assigns a value based on K nearest values. ### 3. <h3>Suppose we have a data set with five predictors, X 1 = GPA, X 2 = IQ, X 3 = Gender (1 for Female and 0 for Male), X 4 = Interaction between GPA and IQ, and X 5 = Interaction between GPA and Gender. The response is starting salary after graduation (in thousands of dollars). Suppose we use least squares to fit the model, and get β̂ 0 = 50, β̂ 1 = 20, β̂ 2 = 0.07, β̂ 3 = 35, β̂ 4 = 0.01, β̂ 5 = −10. (a) Which answer is correct, and why? i. For a fixed value of IQ and GPA, males earn more on average than females. ii. For a fixed value of IQ and GPA, females earn more on average than males. iii. For a fixed value of IQ and GPA, males earn more on average than females provided that the GPA is high enough. iv. For a fixed value of IQ and GPA, females earn more on average than males provided that the GPA is high enough. (b) Predict the salary of a female with IQ of 110 and a GPA of 4.0. (c) True or false: Since the coefficient for the GPA/IQ interaction term is very small, there is very little evidence of an interaction effect. Justify your answer.</h3> y = 50 + 20 x1 + 0.07 x2 + 35 x3 + 0.01 x1x2 - 10 x1x3 a) Given x1 and x2 are constant , y only depends on gender ( 0 male , 1 female ) male - y = 50 + 20 x1 + 0.07 x2 + 0.01 x1x2 female - y = 50 + 20 x1 + 0.07 x2 + 35 + 0.01 x1x2 - 10 x1x3 = 85 + 20 x1 + 0.07 x2 + 0.01 x1x2 - 10 x1 Thus after a suffiently high GPA males will earn more ( no negative term ) => iii b) 137.1 c) False , significance is determined by p value and not by coefficient ### 4. <h3> I collect a set of data (n = 100 observations) containing a single predictor and a quantitative response. I then fit a linear regression model to the data, as well as a separate cubic regression, i.e. Y = β 0 + β 1 X + β 2 X 2 + β 3 X 3 + . (a) Suppose that the true relationship between X and Y is linear, i.e. Y = β 0 + β 1 X + . Consider the training residual sum of squares (RSS) for the linear regression, and also the training RSS for the cubic regression. Would we expect one to be lower than the other, would we expect them to be the same, or is there not enough information to tell? Justify your answer (b) Answer (a) using test rather than training RSS. (c) Suppose that the true relationship between X and Y is not linear, but we don’t know how far it is from linear. Consider the training RSS for the linear regression, and also the training RSS for the cubic regression. Would we expect one to be lower than the other, would we expect them to be the same, or is there not enough information to tell? Justify your answer. (d) Answer (c) using test rather than training RSS. </h3> a) RSS for the Polynomial Regression will be lesser in training data , as the curve will try and fir as many points as possible b) As the actual relationship is linear , the polynomial model would have overfitted to the training data. Hence in case of test data , Linear model will have lesser RSS c) RSS for polynomial model will be lesser than the Linear one as the polynomial model can fit the points better in trraining data. d) Due to not knowing ' How far from linear ' we cannot make assumptions about the test data RSS. If the model is closer to linear then linear model will have less test RSS and vice versa ### 5. Find ai <img src="5.jpg" width=300 height=100 > ### 6. <h3>Using (3.4), argue that in the case of simple linear regression, the least squares line always passes through the point (x̄, ȳ).</h3> <img src="6.jpg" width> ### 7. <h3>It is claimed in the text that in the case of simple linear regression of Y onto X, the R 2 statistic (3.17) is equal to the square of the correlation between X and Y (3.18). Prove that this is the case. For simplicity, you may assume that x̄ = ȳ = 0.</h3> <img src="7.1.jpg" width=100 height=100>
0.740362
0.984155
![Astrofisica Computacional](../logo.png) --- ## 08. Introduction to `AstroPy`. Coordinates and Tables Eduard Larrañaga (ealarranaga@unal.edu.co) --- ### About this notebook In this notebook we present an introduction to the use coordinates and tables in `astropy`. --- ``` %matplotlib inline import matplotlib.pyplot as plt import numpy as np from astropy import units as u ``` ## 1. Coordinates The module [astropy.coordinates](http://docs.astropy.org/en/stable/coordinates/) provides a framework to handle sky positions in various coordinate systems and transformations between them. The basic class to handle sky coordinates is [SkyCoord](http://docs.astropy.org/en/stable/api/astropy.coordinates.SkyCoord.html), ``` from astropy.coordinates import SkyCoord ``` We can define the position angle for longitude and latitude for a particular source, together with a keyword specifying a coordinate frame. For example, using the International Celestial Reference System 'icrs' and decimal degrees, we define the position of the crab nebula as ``` position_crab = SkyCoord(83.63 * u.deg, 22.01 * u.deg, frame='icrs') position_crab ``` It is also possible to use `lists`, `arrays` or even `Quantities` to define the coordinates, ``` positions = SkyCoord([345., 234.3] * u.deg, [-0.1, 0.2] * u.deg, frame='galactic') positions ``` An interesting option is to define the angular position using strings with the `'hms'` and `'dms'` notation: ``` position_crab = SkyCoord('5h34m31.97s', '22d0m52.10s', frame='icrs') position_crab ``` Alternatively, we can use the argument `unit`, ``` position_crab = SkyCoord('5:34:31.97', '22:0:52.10', unit=(u.hour, u.deg), frame='icrs') position_crab ``` ### 1.1. Catalogues A very convenient and easy way to get the coordinates of a particular source is by using the [Sesame](http://cds.u-strasbg.fr/cgi-bin/Sesame) database with the command `SkyCoord.from_name()`: ``` SkyCoord.from_name('Crab') ``` To access the longitude and latitud angles individually we use the attributes `.lon`and `.lat` ``` position_crab.data.lon position_crab.data.lat ``` ### 1.2. Transformation between cordinate systems In order to transform the coordinates from one coordinate system to another system we can use the command `SkyCoord.transform_to()`, ``` position_crab_galactic = position_crab.transform_to('galactic') position_crab_galactic ``` It is also possible to use the attributes `.galactic` or `.icrs` to perform the transformation: ``` position_crab.galactic position_crab.icrs ``` ### 1.3. Measuring distances between positions in the sky The angular distance between two [SkyCoord](http://docs.astropy.org/en/stable/api/astropy.coordinates.SkyCoord.html) objects, can be found using the method [SkyCoord.separation()](http://docs.astropy.org/en/stable/api/astropy.coordinates.SkyCoord.html#astropy.coordinates.SkyCoord.separation). For example, consider the source Sagittarius A* (Sgr A*), at the center of the Milky Way, ``` #position_saga = SkyCoord.from_name('Sag A*') position_SgrA = SkyCoord(0 * u.deg, 0 * u.deg, frame='galactic') position_SgrA ``` The distance from the Crab nebula to Sgr A* is ``` position_crab.separation(position_SgrA) position_crab ``` The inverse proble is also possible. In this case we want to compute a new position in the sky based on a given offset and position angle. For example, from the Crab nebula we can calculate ``` position_crab.directional_offset_by( separation=1 * u.deg, position_angle=0 * u.deg) ``` ### 1.4. ALT - AZ coordinates When planning observations, it is convenient to transform the sky coordinates into a position in the horizontal coordinate system, given a location on earth and a time. We will use the functions [astropy.coordinates.Earthlocation](https://docs.astropy.org/en/stable/api/astropy.coordinates.EarthLocation.html) and [astropy.coordinates.AltAz](https://docs.astropy.org/en/stable/api/astropy.coordinates.AltAz.html), ``` from astropy.coordinates import EarthLocation, AltAz from astropy.time import Time ``` The location of Bogotá, Colombia, is ``` Bogota = EarthLocation(lat=4.7110 * u.deg, lon=74.0721 * u.deg) Bogota.geodetic ``` and the local time is calculated using the [Time](http://docs.astropy.org/en/stable/api/astropy.time.Time.html) object: ``` now = Time.now() print(now) ``` Now, we define a horizontal coordinate system using the [AltAz]([docs.astropy.org/en/stable/api/astropy.coordinates.AltAz.html) class and use it to convert from the sky coordinate, ``` altaz = AltAz(obstime=now, location=Bogota) crab_altaz = position_crab.transform_to(altaz) crab_altaz ``` Note that we obtain the alt-az coordinates and aditional information about the local observation conditions. ## 2. Tables Another interesting characteristic of `astropy` is the [Table](http://docs.astropy.org/en/stable/api/astropy.io.votable.tree.Table.html) class. This allows to handle data tables and data in .fits files. ``` from astropy.table import Table ``` Table objects are created by ``` table = Table() ``` We add columns to the table like we would add entries to a dictionary (Note the units for the coordinates!!) ``` table['Source_Name'] = ['Crab', 'Sag A*', 'Cas A', 'Vela Junior'] table['GalLon'] = [184.5575438, 0, 111.74169477, 266.25914205] * u.deg table['GalLat'] = [-5.78427369, 0, -2.13544151, -1.21985818] * u.deg table['Source_Class'] = ['pwn', 'unc', 'snr', 'snr'] ``` By executing the following cell, we get a nicely formatted version of the table printed in the notebook: ``` table ``` ### 2.1. Accessing rows and columns The attribute `.colnames` gives the names of the columns, ``` table.colnames ``` To access individual columns we use their name, ``` table['GalLon'] table[['Source_Name', 'GalLat']] ``` It is also possible to get the column data as [astropy.units.Quantity](http://docs.astropy.org/en/stable/api/astropy.units.Quantity.html#astropy.units.Quantity) using the `.quantity` property, ``` table['GalLon'].quantity ``` Rows can be accessed using numpy indexing, ``` table[0:2] ``` or by using a boolean numpy array for indexing, ``` selection = table['Source_Name'] == 'Crab' table[selection] ``` More information about indexing can be consulted [here](http://docs.astropy.org/en/stable/table/indexing.html). ### 2.2. Indexing and Grouping The method `.add_index()` allows to define an "index column" to access rows by the value contained in the index column. For example, we add the index corresponding to the "Source_Name" column, ``` table.add_index(colnames="Source_Name") ``` Now, it is possibleto access a particular row using the using the `.loc[]` syntax (as `pandas`dataframes): ``` table.loc["Cas A"] table.loc[["Cas A", "Crab"]] ``` It is also possible to group the rows by a given key column. The groups will be defined by the unique values contained in the column defined as key. ``` table_grouped = table.group_by("Source_Class") for group in table_grouped.groups: print(group, "\n") ``` Each `group` created is again a `Table` object: ``` type(group) ``` ### 2.3. Reading / Writing tables to files Astropy tables can be saved in many formats (for details see [here](http://docs.astropy.org/en/latest/io/unified.html#built-in-table-readers-writers)). ``` table.write('example.fits', overwrite=True, format='fits') Table.read('example.fits') ``` ### 2.4. Other operations Other useful operations when working with Astropy tables. - Sort by key: ``` table.sort('GalLon') table ``` Note that `.sort()` is an "in place operation" on the table, i.e. it changes the actual table. - To remove a specific row by index: ``` table.remove_row(0) table ``` - Astropy tables also support row-wise iteration in Python loops: ``` for row in table: print(row['Source_Name']) ``` - Another useful feature for quickly inspecting the data contained in the table is the `.show_in_browser()` method: ``` table.show_in_browser(jsviewer=True) ```
github_jupyter
%matplotlib inline import matplotlib.pyplot as plt import numpy as np from astropy import units as u from astropy.coordinates import SkyCoord position_crab = SkyCoord(83.63 * u.deg, 22.01 * u.deg, frame='icrs') position_crab positions = SkyCoord([345., 234.3] * u.deg, [-0.1, 0.2] * u.deg, frame='galactic') positions position_crab = SkyCoord('5h34m31.97s', '22d0m52.10s', frame='icrs') position_crab position_crab = SkyCoord('5:34:31.97', '22:0:52.10', unit=(u.hour, u.deg), frame='icrs') position_crab SkyCoord.from_name('Crab') position_crab.data.lon position_crab.data.lat position_crab_galactic = position_crab.transform_to('galactic') position_crab_galactic position_crab.galactic position_crab.icrs #position_saga = SkyCoord.from_name('Sag A*') position_SgrA = SkyCoord(0 * u.deg, 0 * u.deg, frame='galactic') position_SgrA position_crab.separation(position_SgrA) position_crab position_crab.directional_offset_by( separation=1 * u.deg, position_angle=0 * u.deg) from astropy.coordinates import EarthLocation, AltAz from astropy.time import Time Bogota = EarthLocation(lat=4.7110 * u.deg, lon=74.0721 * u.deg) Bogota.geodetic now = Time.now() print(now) altaz = AltAz(obstime=now, location=Bogota) crab_altaz = position_crab.transform_to(altaz) crab_altaz from astropy.table import Table table = Table() table['Source_Name'] = ['Crab', 'Sag A*', 'Cas A', 'Vela Junior'] table['GalLon'] = [184.5575438, 0, 111.74169477, 266.25914205] * u.deg table['GalLat'] = [-5.78427369, 0, -2.13544151, -1.21985818] * u.deg table['Source_Class'] = ['pwn', 'unc', 'snr', 'snr'] table table.colnames table['GalLon'] table[['Source_Name', 'GalLat']] table['GalLon'].quantity table[0:2] selection = table['Source_Name'] == 'Crab' table[selection] table.add_index(colnames="Source_Name") table.loc["Cas A"] table.loc[["Cas A", "Crab"]] table_grouped = table.group_by("Source_Class") for group in table_grouped.groups: print(group, "\n") type(group) table.write('example.fits', overwrite=True, format='fits') Table.read('example.fits') table.sort('GalLon') table table.remove_row(0) table for row in table: print(row['Source_Name']) table.show_in_browser(jsviewer=True)
0.47317
0.984396
<img src="images/dask_horizontal.svg" align="right" width="30%"> # Bag:半结构化数据的并行列表 Dask-bag 擅长处理可以表示为任意输入序列的数据。我们将其称为“杂乱”数据,因为它可能包含复杂的嵌套结构、缺失的字段、数据类型的混合等。 *函数式*编程风格非常适合标准Python迭代,例如可以在`itertools`模块。 当第一次消耗大量原始数据时,在数据处理管道的开始阶段经常会遇到凌乱的数据。初始数据集可能是JSON、CSV、XML或任何其他不强制执行严格结构和数据类型的格式。 出于这个原因,最初的数据按摩和处理通常是用Python的 `list`、`dict`和 `set`来完成的。 这些核心数据结构针对通用存储和处理进行了优化。使用迭代器/生成器表达式或诸如`itertools` 或 [`toolz`](https://toolz.readthedocs.io/en/latest/) 之类的库添加流式计算,让我们可以在小空间内处理大量数据。如果我们将其与并行处理相结合,那么我们可以处理大量数据。 Dask.bag是一个高级Dask集合,用于自动化这种形式的常见工作负载。简而言之, dask.bag = map, filter, toolz + 并行执行 **相关文档** * [Bag 文档](https://docs.dask.org/en/latest/bag.html) * [Bag 屏幕录像](https://youtu.be/-qIiJ1XtSv0) * [Bag API](https://docs.dask.org/en/latest/bag-api.html) * [Bag 样例](https://examples.dask.org/bag.html) ## 创建数据 ``` %run prep.py -d accounts ``` ## 设置 同样,我们将使用分布式调度器。 调度器将在[以后](05_distributed.ipynb)更详细地解释。 ``` from dask.distributed import Client client = Client(n_workers=4) ``` ## 创建 您可以从Python序列、从文件、从S3上的数据等创建`Bag`。 我们演示了使用 `.take()` 来显示数据元素。 (执行`.take(1)` 会生成一个包含一个元素的元组) 注意数据是分块的,每个块有很多项。 在第一个示例中,两个分区各包含五个元素,在接下来的两个示例中,每个文件被划分为一个或多个字节块。 ``` # 每个元素都是一个整数 import dask.bag as db b = db.from_sequence([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], npartitions=2) b.take(3) # 每个元素都是一个文本文件,其中每一行都是一个 JSON 对象 # 注意压缩是自动处理的 import os b = db.read_text(os.path.join('data', 'accounts.*.json.gz')) b.take(1) # 编辑sources.py以配置源位置 import sources sources.bag_url # 需要`s3fs`库 # 每个分区都是一个远程 CSV 文本文件 b = db.read_text(sources.bag_url, storage_options={'anon': True}) b.take(1) ``` ## 操作 `Bag` 对象包含了Python 标准库、`toolz` 或 `pyspark` 等项目中找到的标准功能 API,包括 `map`、`filter`、`groupby` 等。 对`Bag`对象的操作会创建新的`Bag`。 调用`.compute()` 方法来触发执行,正如我们在`Delayed` 对象中看到的那样。 ``` def is_even(n): return n % 2 == 0 b = db.from_sequence([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) c = b.filter(is_even).map(lambda x: x ** 2) c # 阻塞形式:等待完成(在这种情况下非常快) c.compute() ``` ### 示例:Accounts JSON 数据 我们在您的数据目录中创建了一个gzip压缩的JSON 数据的假数据集。 这类似于我们稍后将看到的 `DataFrame` 示例中使用的示例,不同之处在于它将每个单独的 `id` 的所有条目捆绑到单个记录中。 这类似于您可能从文档存储数据库或 Web API 收集的数据。 每一行都是一个 JSON 编码的字典,带有以下键 * id:客户的唯一标识符 * name:客户姓名 * transaction-:`transaction-id`、`amount` 对列表,该文件中客户的每笔交易各一笔。 ``` filename = os.path.join('data', 'accounts.*.json.gz') lines = db.read_text(filename) lines.take(3) ``` 我们的数据以文本行的形式从文件中出来。请注意,文件解压缩是自动进行的。通过将`json.loads`函数映射到bag中,我们可以使这些数据看起来更合理。 ``` import json js = lines.map(json.loads) # take: 检查前几个元素 js.take(3) ``` ### 基本查询 一旦我们将 JSON 数据解析为适当的 Python 对象(`dict`、`list`等),我们就可以通过创建小型Python函数来运行我们的数据,以执行更有趣的查询。 ``` # filter: 仅保留序列的某些元素 js.filter(lambda record: record['name'] == 'Alice').take(5) def count_transactions(d): return {'name': d['name'], 'count': len(d['transactions'])} # map: apply a function to each element (js.filter(lambda record: record['name'] == 'Alice') .map(count_transactions) .take(5)) # pluck: 从字典中选择一个字段, element[field] (js.filter(lambda record: record['name'] == 'Alice') .map(count_transactions) .pluck('count') .take(5)) # 所有 Alice 条目的平均交易数 (js.filter(lambda record: record['name'] == 'Alice') .map(count_transactions) .pluck('count') .mean() .compute()) ``` ### 使用`flatten` 去除嵌套 在下面的示例中,我们看到使用 `.flatten()` 来平铺结果。 我们计算所有 Alice 的所有交易的平均金额。 ``` (js.filter(lambda record: record['name'] == 'Alice') .pluck('transactions') .take(3)) (js.filter(lambda record: record['name'] == 'Alice') .pluck('transactions') .flatten() .take(3)) (js.filter(lambda record: record['name'] == 'Alice') .pluck('transactions') .flatten() .pluck('amount') .take(3)) (js.filter(lambda record: record['name'] == 'Alice') .pluck('transactions') .flatten() .pluck('amount') .mean() .compute()) ``` ### Groupby 和 Foldby 通常我们想通过一些函数或键对数据进行分组。 我们可以使用 `.groupby` 方法来做到这一点,该方法很简单,但会强制对数据进行完整的混洗(代价很高),或者使用更难使用但速度更快的 `.foldby` 方法,它将groupby和归约结合起来进行流式处理 * `groupby`:混洗数据,使所有具有相同键的项目都在相同的键值对中 * `foldby`:遍历数据,每个键累积一个结果 *注意:完整的 groupby 特别糟糕。 在实际工作负载中,如果可能,您最好使用 `foldby` 或切换到 `DataFrame`。* ### `groupby` Groupby收集集合中的项,以便将某些函数下具有相同值的所有项收集到一个键值对中。 ``` b = db.from_sequence(['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']) b.groupby(len).compute() # names grouped by length b = db.from_sequence(list(range(10))) b.groupby(lambda x: x % 2).compute() b.groupby(lambda x: x % 2).starmap(lambda k, v: (k, max(v))).compute() ``` ### `foldby` Foldby初看起来可能很奇怪。它类似于其他库中的以下函数: * [`toolz.reduceby`](http://toolz.readthedocs.io/en/latest/streaming-analytics.html#streaming-split-apply-combine) * [`pyspark.RDD.combineByKey`](http://abshinn.github.io/python/apache-spark/2014/10/11/using-combinebykey-in-apache-spark/) * 使用 `foldby` 时,您提供 1. 对元素进行分组的关键函数 2. 一个二元运算符,比如你会传递给 `reduce` 的,您可以使用该运算符对每个组执行归约 3. 组合二元运算符,可以组合对数据集不同部分的两次`reduce`”`调用的结果。 你的归约必须是关联的。 它会在数据集的每个分区中并行发生。 然后所有这些中间结果将由`combine`二元运算符组合。 ``` b.foldby(lambda x: x % 2, binop=max, combine=max).compute() ``` ### account数据的示例 我们找到同名的人数 ``` %%time # 警告,这将花费一定时间... result = js.groupby(lambda item: item['name']).starmap(lambda k, v: (k, len(v))).compute() print(sorted(result)) %%time # 这个速度相对较快,并产生相同的结果。 from operator import add def incr(tot, _): return tot + 1 result = js.foldby(key='name', binop=incr, initial=0, combine=add, combine_initial=0).compute() print(sorted(result)) ``` ### 练习:计算每个名字的总金额 我们想要groupby (或 foldby) `name` 键, 然后将每个名字的所有金额相加。 步骤 1. 创建一个小函数,给定一个字典 {'name': 'Alice', 'transactions': [{'amount': 1, 'id': 123}, {'amount': 2, 'id': 456}]} 计算amount的总和, 如: `3` 2. 稍微更改上面“foldby”示例的二元运算符,以便二元运算符不计算条目的数量,而是累加数量的总和。 ``` # 将你的代码写在这里。。。 ``` ## DataFrames 出于与 Pandas 通常比纯 Python 更快的相同原因,`dask.dataframe` 可以比 `dask.bag` 更快。 稍后我们将更多地使用 DataFrames,但从 Bag 的角度来看,它通常是数据摄取“混乱”部分的终点 —— 一旦数据可以制成dataframe,然后进行复杂的拆分-应用-合并(`split-apply-combine`) 逻辑将变得更加直接和高效。 您可以使用 `to_dataframe` 方法将具有简单元组或平面字典结构的包转换为 `dask.dataframe`。 ``` df1 = js.to_dataframe() df1.head() ``` 这现在看起来像一个定义良好的 DataFrame,我们可以有效地对其应用类似 Pandas 的计算。 使用 Dask DataFrame,我们事先计算同名人数需要多长时间? 事实证明,`dask.dataframe.groupby()` 比`dask.bag.groupby()` 高出一个数量级以上; 但在这个案例中,它仍然无法比得上`dask.bag.foldby()`。 ``` %time df1.groupby('name').id.count().compute().head() ``` ### 非规范化 这种 DataFrame 格式不是最佳的,因为 `transactions` 列充满了嵌套数据,所以 Pandas 必须恢复到 `object`数据类型,这在 Pandas 中很慢。 理想情况下,我们希望仅在我们将数据展平后才转换为数据帧,以便每条记录都是单独的 `int`、`string`、`float` 等。 ``` def denormalize(record): # 为每个人返回一个列表,每笔交易一个项目 return [{'id': record['id'], 'name': record['name'], 'amount': transaction['amount'], 'transaction-id': transaction['transaction-id']} for transaction in record['transactions']] transactions = js.map(denormalize).flatten() transactions.take(3) df = transactions.to_dataframe() df.head() %%time # 每个名字的交易数量 # 注意这里的时间包括数据加载和摄取 df.groupby('name')['transaction-id'].count().compute() ``` ## 局限性 Bags 提供了非常通用的计算(任何 Python 函数)。这种通用性是有代价的。 Bags 具有以下已知局限性: 1. Bag 操作往往比数组/数据帧计算慢,就像 Python 比 NumPy/Pandas 慢一样 2. ``Bag.groupby`` 很慢。 如果可能,您应该尝试使用`Bag.foldby`。 使用``Bag.foldby`` 需要更多的思考。 更好的是,考虑创建一个规范化的数据框。 ## 了解更多 * [Bag 文档](https://docs.dask.org/en/latest/bag.html) * [Bag 屏幕录像](https://youtu.be/-qIiJ1XtSv0) * [Bag API](https://docs.dask.org/en/latest/bag-api.html) * [Bag 示例](https://examples.dask.org/bag.html) ## 关闭 ``` client.shutdown() ```
github_jupyter
%run prep.py -d accounts from dask.distributed import Client client = Client(n_workers=4) # 每个元素都是一个整数 import dask.bag as db b = db.from_sequence([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], npartitions=2) b.take(3) # 每个元素都是一个文本文件,其中每一行都是一个 JSON 对象 # 注意压缩是自动处理的 import os b = db.read_text(os.path.join('data', 'accounts.*.json.gz')) b.take(1) # 编辑sources.py以配置源位置 import sources sources.bag_url # 需要`s3fs`库 # 每个分区都是一个远程 CSV 文本文件 b = db.read_text(sources.bag_url, storage_options={'anon': True}) b.take(1) def is_even(n): return n % 2 == 0 b = db.from_sequence([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) c = b.filter(is_even).map(lambda x: x ** 2) c # 阻塞形式:等待完成(在这种情况下非常快) c.compute() filename = os.path.join('data', 'accounts.*.json.gz') lines = db.read_text(filename) lines.take(3) import json js = lines.map(json.loads) # take: 检查前几个元素 js.take(3) # filter: 仅保留序列的某些元素 js.filter(lambda record: record['name'] == 'Alice').take(5) def count_transactions(d): return {'name': d['name'], 'count': len(d['transactions'])} # map: apply a function to each element (js.filter(lambda record: record['name'] == 'Alice') .map(count_transactions) .take(5)) # pluck: 从字典中选择一个字段, element[field] (js.filter(lambda record: record['name'] == 'Alice') .map(count_transactions) .pluck('count') .take(5)) # 所有 Alice 条目的平均交易数 (js.filter(lambda record: record['name'] == 'Alice') .map(count_transactions) .pluck('count') .mean() .compute()) (js.filter(lambda record: record['name'] == 'Alice') .pluck('transactions') .take(3)) (js.filter(lambda record: record['name'] == 'Alice') .pluck('transactions') .flatten() .take(3)) (js.filter(lambda record: record['name'] == 'Alice') .pluck('transactions') .flatten() .pluck('amount') .take(3)) (js.filter(lambda record: record['name'] == 'Alice') .pluck('transactions') .flatten() .pluck('amount') .mean() .compute()) b = db.from_sequence(['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']) b.groupby(len).compute() # names grouped by length b = db.from_sequence(list(range(10))) b.groupby(lambda x: x % 2).compute() b.groupby(lambda x: x % 2).starmap(lambda k, v: (k, max(v))).compute() b.foldby(lambda x: x % 2, binop=max, combine=max).compute() %%time # 警告,这将花费一定时间... result = js.groupby(lambda item: item['name']).starmap(lambda k, v: (k, len(v))).compute() print(sorted(result)) %%time # 这个速度相对较快,并产生相同的结果。 from operator import add def incr(tot, _): return tot + 1 result = js.foldby(key='name', binop=incr, initial=0, combine=add, combine_initial=0).compute() print(sorted(result)) # 将你的代码写在这里。。。 df1 = js.to_dataframe() df1.head() %time df1.groupby('name').id.count().compute().head() def denormalize(record): # 为每个人返回一个列表,每笔交易一个项目 return [{'id': record['id'], 'name': record['name'], 'amount': transaction['amount'], 'transaction-id': transaction['transaction-id']} for transaction in record['transactions']] transactions = js.map(denormalize).flatten() transactions.take(3) df = transactions.to_dataframe() df.head() %%time # 每个名字的交易数量 # 注意这里的时间包括数据加载和摄取 df.groupby('name')['transaction-id'].count().compute() client.shutdown()
0.295027
0.97765
import pandas as pd import numpy as np from sklearn import * from kaggle.competitions import twosigmanews env = twosigmanews.make_env() (market_train, news_train) = env.get_training_data() def data_prep(market_train,news_train): market_train.time = market_train.time.dt.date news_train.time = news_train.time.dt.hour news_train.sourceTimestamp= news_train.sourceTimestamp.dt.hour news_train.firstCreated = news_train.firstCreated.dt.date news_train['assetCodesLen'] = news_train['assetCodes'].map(lambda x: len(eval(x))) news_train['assetCodes'] = news_train['assetCodes'].map(lambda x: list(eval(x))[0]) kcol = ['firstCreated', 'assetCodes'] news_train = news_train.groupby(kcol, as_index=False).mean() market_train = pd.merge(market_train, news_train, how='left', left_on=['time', 'assetCode'], right_on=['firstCreated', 'assetCodes']) lbl = {k: v for v, k in enumerate(market_train['assetCode'].unique())} market_train['assetCodeT'] = market_train['assetCode'].map(lbl) market_train = market_train.dropna(axis=0) return market_train market_train = data_prep(market_train,news_train) # The target is binary up = market_train.returnsOpenNextMktres10 >= 0 fcol = [c for c in market_train if c not in ['assetCode', 'assetCodes', 'assetCodesLen', 'assetName', 'audiences', 'firstCreated', 'headline', 'headlineTag', 'marketCommentary', 'provider', 'returnsOpenNextMktres10', 'sourceId', 'subjects', 'time', 'time_x', 'universe','sourceTimestamp']] # We still need the returns for model tuning X = market_train[fcol].values up = up.values r = market_train.returnsOpenNextMktres10.values # Scaling of X values # It is good to keep these scaling values for later mins = np.min(X, axis=0) maxs = np.max(X, axis=0) rng = maxs - mins X = 1 - ((maxs - X) / rng) # Sanity check assert X.shape[0] == up.shape[0] == r.shape[0] X_train, X_test, up_train, up_test, r_train, r_test\ = model_selection.train_test_split(X, up, r, test_size=0.25, random_state=99) from xgboost import XGBClassifier import time xgb_up = XGBClassifier(n_jobs=4,n_estimators=200,max_depth=8,eta=0.1) t = time.time() print('Fitting Up') xgb_up.fit(X_train,up_train) print(f'Done, time = {time.time() - t}') A side effect of treating this as a binary task is that we can use a simpler metric to judge our models from sklearn.metrics import accuracy_score accuracy_score(xgb_up.predict(X_test),up_test) days = env.get_prediction_days() n_days = 0 prep_time = 0 prediction_time = 0 packaging_time = 0 for (market_obs_df, news_obs_df, predictions_template_df) in days: n_days +=1 print(n_days,end=' ') t = time.time() market_obs_df = data_prep(market_obs_df, news_obs_df) market_obs_df = market_obs_df[market_obs_df.assetCode.isin(predictions_template_df.assetCode)] X_live = market_obs_df[fcol].values X_live = 1 - ((maxs - X_live) / rng) prep_time += time.time() - t t = time.time() lp = xgb_up.predict_proba(X_live) prediction_time += time.time() -t t = time.time() confidence = 2* lp[:,1] -1 preds = pd.DataFrame({'assetCode':market_obs_df['assetCode'],'confidence':confidence}) predictions_template_df = predictions_template_df.merge(preds,how='left').drop('confidenceValue',axis=1).fillna(0).rename(columns={'confidence':'confidenceValue'}) env.predict(predictions_template_df) packaging_time += time.time() - t env.write_submission_file() total = prep_time + prediction_time + packaging_time print(f'Preparing Data: {prep_time:.2f}s') print(f'Making Predictions: {prediction_time:.2f}s') print(f'Packing: {packaging_time:.2f}s') print(f'Total: {total:.2f}s') For good measure, we can check what XGBoost bases its decisions on import matplotlib.pyplot as plt %matplotlib inline from xgboost import plot_importance plt.figure(num=None, figsize=(10, 10), dpi=80, facecolor='w', edgecolor='k') plt.bar(range(len(xgb_up.feature_importances_)), xgb_up.feature_importances_) plt.xticks(range(len(xgb_up.feature_importances_)), fcol, rotation='vertical'); # Second Analysis ## Introduction In this competition you will predict how stocks will change based on the market state and news articles. You will loop through a long series of trading days; for each day, you'll receive an updated state of the market, and a series of news articles which were published since the last trading day, along with impacted stocks and sentiment analysis. You'll use this information to predict whether each stock will have increased or decreased ten trading days into the future. Once you make these predictions, you can move on to the next trading day. ## TL;DR: End-to-End Usage Example ``` ## Libraris import pandas as pd import numpy as np import time import lightgbm as lgb from sklearn.metrics import mean_squared_error from sklearn.model_selection import KFold import matplotlib.pyplot as plt import seaborn as sns plt.style.use('seaborn') sns.set(font_scale=2) import warnings warnings.filterwarnings('ignore') import os ``` In the data file description, About this file This is just a sample of the market data. You should not use this data directly. Instead, call env.get_training_data() from the twosigmanews package to get the full training sets in your Kernel. So you download directly below. I am using DJ sterling kernel(https://www.kaggle.com/dster/two-sigma-news-official-getting-started-kernel) thnaks ``` from kaggle.competitions import twosigmanews # You can only call make_env() once, so don't lose it! env = twosigmanews.make_env() print('Done!') ``` ### Load train_data ``` (market_train_df, news_train_df) = env.get_training_data() display("In total: ", market_train_df.shape) market_train_df.head() display("In total: ", news_train_df.shape) news_train_df.head() ``` ### Load test_data ``` days = env.get_prediction_days() (market_obs_df, news_obs_df, predictions_template_df) = next(days) print("Total: ", market_obs_df.shape) market_obs_df.head() print("In total: ", news_obs_df.shape) news_obs_df.head() print("In market_train_df: ", market_train_df.shape);print("In market_obs_df: ", market_obs_df.shape); print("In news_train_df: ", news_train_df.shape);print("In news_obs_df: ", news_obs_df.shape) ``` market_train data is about 40.7 million( 2007-02-01 ~ 2016-12-30 ) , market_test is about 1800 ( 2017-01-03 ). Similarly, news is similar. The other two variables above are missing assetName anduniverse. ## Data Description ### Market data Market data The data includes a subset of US-listed instruments. The set of included instruments changes daily and is determined based on the amount traded and the availability of information. This means that there may be instruments that enter and leave this subset of data. There may therefore be gaps in the data provided, and this does not necessarily imply that that data does not exist (those rows are likely not included due to the selection criteria). The marketdata contains a variety of returns calculated over different timespans. All of the returns in this set of marketdata have these properties: - Returns are always calculated either open-to-open (from the opening time of one trading day to the open of another) or close-to-close (from the closing time of one trading day to the open of another). - Returns are either raw, meaning that the data is not adjusted against any benchmark, or market-residualized (Mktres), meaning that the movement of the market as a whole has been accounted for, leaving only movements inherent to the instrument. - Returns can be calculated over any arbitrary interval. Provided here are 1 day and 10 day horizons. - Returns are tagged with 'Prev' if they are backwards looking in time, or 'Next' if forwards looking. Within the marketdata, you will find the following columns: - time(datetime64[ns, UTC]) - the current time (in marketdata, all rows are taken at 22:00 UTC) - assetCode(object) - a unique id of an asset -assetName(category) - the name that corresponds to a group of assetCodes. These may be "Unknown" if the corresponding - assetCode does not have any rows in the news data. - universe(float64) - a boolean indicating whether or not the instrument on that day will be included in scoring. This value is not provided outside of the training data time period. The trading universe on a given date is the set of instruments that are avilable for trading (the scoring function will not consider instruments that are not in the trading universe). The trading universe changes daily. - volume(float64) - trading volume in shares for the day - close(float64) - the close price for the day (not adjusted for splits or dividends) - open(float64) - the open price for the day (not adjusted for splits or dividends) - returnsClosePrevRaw1(float64) - see returns explanation above - returnsOpenPrevRaw1(float64) - see returns explanation above - returnsClosePrevMktres1(float64) - see returns explanation above - returnsOpenPrevMktres1(float64) - see returns explanation above - returnsClosePrevRaw10(float64) - see returns explanation above - returnsOpenPrevRaw10(float64) - see returns explanation above - returnsClosePrevMktres10(float64) - see returns explanation above - returnsOpenPrevMktres10(float64) - see returns explanation above - returnsOpenNextMktres10(float64) - 10 day, market-residualized return. This is the target variable used in competition scoring. The market data has been filtered such that returnsOpenNextMktres10 is always not null. ### News data The news data contains information at both the news article level and asset level (in other words, the table is intentionally not normalized). - time(datetime64[ns, UTC]) - UTC timestamp showing when the data was available on the feed (second precision) - sourceTimestamp(datetime64[ns, UTC]) - UTC timestamp of this news item when it was created - firstCreated(datetime64[ns, UTC]) - UTC timestamp for the first version of the item - sourceId(object) - an Id for each news item - headline(object) - the item's headline - urgency(int8) - differentiates story types (1: alert, 3: article) - takeSequence(int16) - the take sequence number of the news item, starting at 1. For a given story, alerts and articles have - separate sequences. - provider(category) - identifier for the organization which provided the news item (e.g. RTRS for Reuters News, BSW for - Business Wire) - subjects(category) - topic codes and company identifiers that relate to this news item. Topic codes describe the news item's - subject matter. These can cover asset classes, geographies, events, industries/sectors, and other types. - audiences(category) - identifies which desktop news product(s) the news item belongs to. They are typically tailored to specific - audiences. (e.g. "M" for Money International News Service and "FB" for French General News Service) - bodySize(int32) - the size of the current version of the story body in characters - companyCount(int8) - the number of companies explicitly listed in the news item in the subjects field - headlineTag(object) - the Thomson Reuters headline tag for the news item - marketCommentary(bool) - boolean indicator that the item is discussing general market conditions, such as "After the Bell" summaries - sentenceCount(int16) - the total number of sentences in the news item. Can be used in conjunction with firstMentionSentence to determine the relative position of the first mention in the item. - wordCount(int32) - the total number of lexical tokens (words and punctuation) in the news item - assetCodes(category) - list of assets mentioned in the item - assetName(category) - name of the asset - firstMentionSentence(int16) - the first sentence, starting with the headline, in which the scored asset is mentioned. - 1: headline - 2: first sentence of the story body - 3: second sentence of the body, etc - 0: the asset being scored was not found in the news item's headline or body text. As a result, the entire news item's text (headline + body) will be used to determine the sentiment score. - relevance(float32) - a decimal number indicating the relevance of the news item to the asset. It ranges from 0 to 1. If the asset is mentioned in the headline, the relevance is set to 1. When the item is an alert (urgency == 1), relevance should be gauged by firstMentionSentence instead. - sentimentClass(int8) - indicates the predominant sentiment class for this news item with respect to the asset. The indicated class is the one with the highest probability. - sentimentNegative(float32) - probability that the sentiment of the news item was negative for the asset - sentimentNeutral(float32) - probability that the sentiment of the news item was neutral for the asset - sentimentPositive(float32) - probability that the sentiment of the news item was positive for the asset - sentimentWordCount(int32) - the number of lexical tokens in the sections of the item text that are deemed relevant to the asset. This can be used in conjunction with wordCount to determine the proportion of the news item discussing the asset. - noveltyCount12H(int16) - The 12 hour novelty of the content within a news item on a particular asset. It is calculated by comparing it with the asset-specific text over a cache of previous news items that contain the asset. - noveltyCount24H(int16) - same as above, but for 24 hours - noveltyCount3D(int16) - same as above, but for 3 days - noveltyCount5D(int16) - same as above, but for 5 days - noveltyCount7D(int16) - same as above, but for 7 days - volumeCounts12H(int16) - the 12 hour volume of news for each asset. A cache of previous news items is maintained and the number of news items that mention the asset within each of five historical periods is calculated. - volumeCounts24H(int16) - same as above, but for 24 hours - volumeCounts3D(int16) - same as above, but for 3 days - volumeCounts5D(int16) - same as above, but for 5 days - volumeCounts7D(int16) - same as above, but for 7 days ## Data Cleaning and Exploration ### Null Data #### Market ``` percent = (100 * market_train_df.isnull().sum() / market_train_df.shape[0]).sort_values(ascending=False) percent.plot(kind="bar", figsize = (20,10), fontsize = 20) plt.xlabel("Columns", fontsize = 20) plt.ylabel("Value Percent(%)", fontsize = 20) plt.title("Total Missing Value by market_obs_df", fontsize = 20) percent1 = (100 * market_obs_df.isnull().sum() / market_obs_df.shape[0]).sort_values(ascending=False) percent1.plot(kind="bar", figsize = (20,10), fontsize = 20) plt.xlabel("Columns", fontsize = 20) plt.ylabel("Value Percent(%)", fontsize = 20) plt.title("Total Missing Value by market_obs_df", fontsize = 20) ``` The types of missing values are the same, but the percentage is slightly different. market_train_df : { returnsOpenPrevMktres10 : 2.284680 , returnsClosePrevMktres10 : 2.283599, returnsOpenPrevMktres1 : 0.392540 , returnsClosePrevMktres1 : 0.392344 } market_obs_df : { returnsClosePrevMktres10 : 2.029622, returnsOpenPrevMktres10 : 2.029622, returnsOpenPrevMktres1 : 0.658256, returnsClosePrevMktres1 : 0.658256 } #### News ``` news_train_df['headlineTag'].unique()[0:5] ``` As shown above,, ```''``` is recognized as object. So we have to change these values as missing. ``` # '' convert to NA for i in news_train_df.columns.values.tolist(): # Does NaN means no numbers, can '' be replaced with nan? I do not know this part. news_train_df[i] = news_train_df[i].replace('', np.nan) news_train_df['headlineTag'].unique()[0:5] # I think it would be faster if you just replace object and categorical variables(not int,float). How do I fix the code? percent = (100 * news_train_df.isnull().sum() / news_train_df.shape[0]).sort_values(ascending=False) percent.plot(kind="bar", figsize = (20,10), fontsize = 20) plt.xlabel("Columns", fontsize = 20) plt.ylabel("Value Percent(%)", fontsize = 20) plt.title("Total Missing Value by news_train_df", fontsize = 20) # '' convert to NA for i in news_obs_df.columns.values.tolist(): # Does NaN means no numbers, can '' be replaced with nan? I do not know this part. news_obs_df[i] = news_obs_df[i].replace('', np.nan) percent1 = (100 * news_obs_df.isnull().sum() / news_obs_df.shape[0]).sort_values(ascending=False) percent1.plot(kind="bar", figsize = (20,10), fontsize = 20) plt.xlabel("Columns", fontsize = 20) plt.ylabel("Value Percent(%)", fontsize = 20) plt.title("Total Missing Value by news_obs_df", fontsize = 20) ``` - headlineTag, both the train and the test are close to 70% missing. - headline has some missing values, but not the test. ### Number of unique values
github_jupyter
## Libraris import pandas as pd import numpy as np import time import lightgbm as lgb from sklearn.metrics import mean_squared_error from sklearn.model_selection import KFold import matplotlib.pyplot as plt import seaborn as sns plt.style.use('seaborn') sns.set(font_scale=2) import warnings warnings.filterwarnings('ignore') import os from kaggle.competitions import twosigmanews # You can only call make_env() once, so don't lose it! env = twosigmanews.make_env() print('Done!') (market_train_df, news_train_df) = env.get_training_data() display("In total: ", market_train_df.shape) market_train_df.head() display("In total: ", news_train_df.shape) news_train_df.head() days = env.get_prediction_days() (market_obs_df, news_obs_df, predictions_template_df) = next(days) print("Total: ", market_obs_df.shape) market_obs_df.head() print("In total: ", news_obs_df.shape) news_obs_df.head() print("In market_train_df: ", market_train_df.shape);print("In market_obs_df: ", market_obs_df.shape); print("In news_train_df: ", news_train_df.shape);print("In news_obs_df: ", news_obs_df.shape) percent = (100 * market_train_df.isnull().sum() / market_train_df.shape[0]).sort_values(ascending=False) percent.plot(kind="bar", figsize = (20,10), fontsize = 20) plt.xlabel("Columns", fontsize = 20) plt.ylabel("Value Percent(%)", fontsize = 20) plt.title("Total Missing Value by market_obs_df", fontsize = 20) percent1 = (100 * market_obs_df.isnull().sum() / market_obs_df.shape[0]).sort_values(ascending=False) percent1.plot(kind="bar", figsize = (20,10), fontsize = 20) plt.xlabel("Columns", fontsize = 20) plt.ylabel("Value Percent(%)", fontsize = 20) plt.title("Total Missing Value by market_obs_df", fontsize = 20) news_train_df['headlineTag'].unique()[0:5] # '' convert to NA for i in news_train_df.columns.values.tolist(): # Does NaN means no numbers, can '' be replaced with nan? I do not know this part. news_train_df[i] = news_train_df[i].replace('', np.nan) news_train_df['headlineTag'].unique()[0:5] # I think it would be faster if you just replace object and categorical variables(not int,float). How do I fix the code? percent = (100 * news_train_df.isnull().sum() / news_train_df.shape[0]).sort_values(ascending=False) percent.plot(kind="bar", figsize = (20,10), fontsize = 20) plt.xlabel("Columns", fontsize = 20) plt.ylabel("Value Percent(%)", fontsize = 20) plt.title("Total Missing Value by news_train_df", fontsize = 20) # '' convert to NA for i in news_obs_df.columns.values.tolist(): # Does NaN means no numbers, can '' be replaced with nan? I do not know this part. news_obs_df[i] = news_obs_df[i].replace('', np.nan) percent1 = (100 * news_obs_df.isnull().sum() / news_obs_df.shape[0]).sort_values(ascending=False) percent1.plot(kind="bar", figsize = (20,10), fontsize = 20) plt.xlabel("Columns", fontsize = 20) plt.ylabel("Value Percent(%)", fontsize = 20) plt.title("Total Missing Value by news_obs_df", fontsize = 20)
0.42668
0.592254
# gpvolve Current functionality in `gpvolve.markov` and `gpvolve.simulate`. ``` import gpmap import gpvolve %matplotlib inline import numpy as np import pandas as pd from matplotlib import pyplot as plt ``` #### Generate a rough mount fuji map with 5 sites that can each have 2 states ``` gpm = gpmap.simulate.generate_fuji(num_sites=5,num_states_per_site=2,gpm_output_column="fitness",roughness=0.1) gpm.data.loc[gpm.data.fitness < 0,"fitness"] = 0.0 gpm.get_neighbors() ``` #### Plot map ``` G = gpmap.GenotypePhenotypeGraph() G.add_gpm(gpm) G.add_node_cmap(data_column="fitness") G.add_node_labels(data_column="binary") G.edge_options["arrows"] = True G.node_options["node_size"] = 600 fig, ax = plt.subplots(1,2,figsize=(14,7)) s = np.array([f"{b}" for b in gpm.data.binary]) ax[0].plot(gpm.fitness,color="gray") ax[0].plot(gpm.fitness,"o",color="gray") for i in range(len(gpm.data)): y_pos = np.min(gpm.fitness) - 0.1*np.abs(np.min(gpm.fitness)) #ax[0].text(i,y_pos,gpm.data.binary.iloc[i],rotation="vertical") ax[0].set_ylim(np.min(gpm.fitness)-0.15*np.abs(np.min(gpm.fitness)),np.max(gpm.fitness)*1.05) _ = gpmap.plot(G,ax=ax[1],plot_node_labels=True) for i in range(2): ax[i].spines['right'].set_visible(False) ax[i].spines['top'].set_visible(False) ax[i].spines['left'].set_visible(False) ax[i].spines['bottom'].set_visible(False) ax[0].spines["left"].set_visible(True) ax[0].set_ylabel("fitness") ax[0].xaxis.set_visible(False) ax[0].set_title("genotype fitness values") ax[1].set_title("genotype-fitness map") fig.tight_layout() ``` #### Calculating stochastic transition matrices These give the relative probability that genotype $i$ transitions to its neighboring gentoypes. Each row sums to one and can include a substantial probability that genotype $i$ does not transition at all. ``` # To calculate T, first get c-friendly representation of neigbors neighbor_slicer,neighbors = gpvolve.utils.flatten_neighbors(gpm) # Then calcualte transition matrix given fitness in gpm, neighbors, a fixation model, and a population size # (fixation_model can be moran, mcclandish, or sswm (strong-selection, weak mutation)). T = gpvolve.markov.base.generate_tmatrix(gpm.fitness,neighbor_slicer,neighbors, fixation_model="moran", population_size=10) print("T is a row-stochastic matrix") print("T dimensions:",T.shape) print("column sums:",np.sum(T,axis=0)) print("row sums:",np.sum(T,axis=1)) ## These two functions are useful for illustrating how matrices work import warnings def get_stationary(T): """ Get the stationary distribution for the transition matrix. (The stationary distribution gives the equilibrium population of every genotype). Parameters ---------- T : np.ndarray num_genotypes x num_genotypes transition matrix Returns ------- s : np.ndarray stationary distribution for T """ if not isinstance(T,np.ndarray): err = "T must be a numpy array\n" raise TypeError(err) if T.shape[0] != T.shape[1]: err = "T must be square!\n" raise ValueError(err) if np.sum(np.isclose(np.sum(T,axis=1),np.ones(T.shape[0],dtype=float))) != T.shape[0]: err = "All vectors in T along axis 1 must sum to zero (row stochastic)\n" raise ValueError(err) A = np.ones((T.shape[0]+1,T.shape[0]),dtype=float) A[:T.shape[0],:T.shape[0]:] = np.transpose(T)-np.identity(T.shape[0]) b = np.zeros(T.shape[0] + 1,dtype=float) b[-1] = 1 b = np.transpose(b) failed = False try: stationary = np.linalg.solve(np.transpose(A).dot(A), np.transpose(A).dot(b)) clean_up_mask = np.logical_and(np.isclose(stationary,0),stationary < 0) stationary[clean_up_mask] = 0.0 stationary = stationary/np.sum(stationary) except np.linalg.LinAlgError: failed = True # Failed or got a negative value. Try an alternate approach if failed or np.sum(stationary < 0) > 0: evals, evecs = np.linalg.eig(T.T) evec1 = evecs[:,np.isclose(evals,1)] evec1 = evec1[:,0] clean_up_mask = np.logical_and(np.isclose(stationary,0),evec1 < 0) evec1[clean_up_mask] = 0.0 if np.sum(evec1 < 0) > 0: w = "not all stationary state values are positive!\n" warnings.warn(w) stationary = (evec1/np.sum(evec1)).real return stationary def get_and_show_T(gpm,fixation_model,population_size): """ Calculate a transition matrix from a gpm given a fixation_model and population size. """ neighbor_slicer,neighbors = gpvolve.utils.flatten_neighbors(gpm) T = gpvolve.markov.base.generate_tmatrix(gpm.fitness,neighbor_slicer,neighbors, fixation_model=fixation_model, population_size=population_size) stationary = get_stationary(T) fig, ax = plt.subplots(1,4,figsize=(16,4)) ax[0].imshow(T) ax[1].plot(gpm.fitness,"o") ax[2].plot(stationary,"o") ax[3].plot(gpm.fitness,stationary,"o") ax[0].set_xlabel("genotype") ax[0].set_ylabel("genotype") ax[0].set_title("transition matrix") ax[1].set_xlabel("genotype") ax[1].set_ylabel("fitness") ax[1].set_title("genotype fitness") ax[2].set_xlabel("genotype") ax[2].set_ylabel("relative population") ax[2].set_title("equilibrium population") if np.isclose(np.min(stationary),np.max(stationary)): ax[2].set_ylim((stationary[0]*.95,stationary[0]*1.05)) ax[3].set_xlabel("fitness") ax[3].set_ylabel("relative population") ax[3].set_title("fitness vs. population") if np.isclose(np.min(stationary),np.max(stationary)): ax[3].set_ylim((stationary[0]*.95,stationary[0]*1.05)) fig.suptitle(f"fixation = {fixation_model}, N = {population_size}") plt.tight_layout() plt.show() return T ``` #### Demonstrate features of matrix Notice: as the population size goes up, the transition matrix becomes more sparse because the probability of each move depends on fitness difference as well as connectivity. The equilibrium populations also become more and more concentrated on a single genotype. (At higher populations, we can start to see numerical errors in the equilibrium population estimate because there is zero probability of a genotype leaving the high-fitness peaks) ``` for N in [1,3,10,30,100]: T = get_and_show_T(gpm,fixation_model="moran",population_size=N) plt.show() ``` ### Simulation code Run a Wright-Fisher simulation (haploid) starting with 1000 individuals at the lowest fitness genotype in the map. Use a mutation probability of 0.001/replication event (average of one mutant/generation given population size). These simulations randomly select genotypes that reproduce, so will give different outcomes every time. ``` # Put 1000 individuals at lowest fitness genotype minimum = gpm.data.iloc[np.argmin(gpm.data.fitness),:].name gpm.data.loc[:,"initial_pop"] = np.zeros(len(gpm.data),dtype=int) gpm.data.loc[minimum,"initial_pop"] = 1000 # Simulate with mutation rate 0.001 (average of one mut/generation with pop size of 1000) pops = gpvolve.simulate.simulate(gpm,initial_pop_column="initial_pop",mutation_rate=0.001,num_steps=10000) # Plot only genotypes populated over the simulation populated = np.sum(pops,axis=0) > 0 pops2 = pops[:,populated] # Plot results fig, ax = plt.subplots(1,3,figsize=(15,6)) ax[0].plot(gpm.fitness[populated],np.arange(len(gpm.data.fitness),dtype=int)[populated],"o") ax[0].set_xlabel("fitness") ax[0].set_ylabel("genotype") ax[2].set_title("genotype fitness") ax[1].imshow(pops2.T[:,:101],aspect="auto",origin="lower") ax[1].set_ylabel("genotype") ax[1].set_xlabel("steps") ax[1].set_title("short time dynamics") ax[2].imshow(pops2.T[:,:],aspect="auto",origin="lower") ax[2].set_ylabel("genotype") ax[2].set_xlabel("steps") ax[2].set_title("long time dynamics") fig.tight_layout() plt.show() ```
github_jupyter
import gpmap import gpvolve %matplotlib inline import numpy as np import pandas as pd from matplotlib import pyplot as plt gpm = gpmap.simulate.generate_fuji(num_sites=5,num_states_per_site=2,gpm_output_column="fitness",roughness=0.1) gpm.data.loc[gpm.data.fitness < 0,"fitness"] = 0.0 gpm.get_neighbors() G = gpmap.GenotypePhenotypeGraph() G.add_gpm(gpm) G.add_node_cmap(data_column="fitness") G.add_node_labels(data_column="binary") G.edge_options["arrows"] = True G.node_options["node_size"] = 600 fig, ax = plt.subplots(1,2,figsize=(14,7)) s = np.array([f"{b}" for b in gpm.data.binary]) ax[0].plot(gpm.fitness,color="gray") ax[0].plot(gpm.fitness,"o",color="gray") for i in range(len(gpm.data)): y_pos = np.min(gpm.fitness) - 0.1*np.abs(np.min(gpm.fitness)) #ax[0].text(i,y_pos,gpm.data.binary.iloc[i],rotation="vertical") ax[0].set_ylim(np.min(gpm.fitness)-0.15*np.abs(np.min(gpm.fitness)),np.max(gpm.fitness)*1.05) _ = gpmap.plot(G,ax=ax[1],plot_node_labels=True) for i in range(2): ax[i].spines['right'].set_visible(False) ax[i].spines['top'].set_visible(False) ax[i].spines['left'].set_visible(False) ax[i].spines['bottom'].set_visible(False) ax[0].spines["left"].set_visible(True) ax[0].set_ylabel("fitness") ax[0].xaxis.set_visible(False) ax[0].set_title("genotype fitness values") ax[1].set_title("genotype-fitness map") fig.tight_layout() # To calculate T, first get c-friendly representation of neigbors neighbor_slicer,neighbors = gpvolve.utils.flatten_neighbors(gpm) # Then calcualte transition matrix given fitness in gpm, neighbors, a fixation model, and a population size # (fixation_model can be moran, mcclandish, or sswm (strong-selection, weak mutation)). T = gpvolve.markov.base.generate_tmatrix(gpm.fitness,neighbor_slicer,neighbors, fixation_model="moran", population_size=10) print("T is a row-stochastic matrix") print("T dimensions:",T.shape) print("column sums:",np.sum(T,axis=0)) print("row sums:",np.sum(T,axis=1)) ## These two functions are useful for illustrating how matrices work import warnings def get_stationary(T): """ Get the stationary distribution for the transition matrix. (The stationary distribution gives the equilibrium population of every genotype). Parameters ---------- T : np.ndarray num_genotypes x num_genotypes transition matrix Returns ------- s : np.ndarray stationary distribution for T """ if not isinstance(T,np.ndarray): err = "T must be a numpy array\n" raise TypeError(err) if T.shape[0] != T.shape[1]: err = "T must be square!\n" raise ValueError(err) if np.sum(np.isclose(np.sum(T,axis=1),np.ones(T.shape[0],dtype=float))) != T.shape[0]: err = "All vectors in T along axis 1 must sum to zero (row stochastic)\n" raise ValueError(err) A = np.ones((T.shape[0]+1,T.shape[0]),dtype=float) A[:T.shape[0],:T.shape[0]:] = np.transpose(T)-np.identity(T.shape[0]) b = np.zeros(T.shape[0] + 1,dtype=float) b[-1] = 1 b = np.transpose(b) failed = False try: stationary = np.linalg.solve(np.transpose(A).dot(A), np.transpose(A).dot(b)) clean_up_mask = np.logical_and(np.isclose(stationary,0),stationary < 0) stationary[clean_up_mask] = 0.0 stationary = stationary/np.sum(stationary) except np.linalg.LinAlgError: failed = True # Failed or got a negative value. Try an alternate approach if failed or np.sum(stationary < 0) > 0: evals, evecs = np.linalg.eig(T.T) evec1 = evecs[:,np.isclose(evals,1)] evec1 = evec1[:,0] clean_up_mask = np.logical_and(np.isclose(stationary,0),evec1 < 0) evec1[clean_up_mask] = 0.0 if np.sum(evec1 < 0) > 0: w = "not all stationary state values are positive!\n" warnings.warn(w) stationary = (evec1/np.sum(evec1)).real return stationary def get_and_show_T(gpm,fixation_model,population_size): """ Calculate a transition matrix from a gpm given a fixation_model and population size. """ neighbor_slicer,neighbors = gpvolve.utils.flatten_neighbors(gpm) T = gpvolve.markov.base.generate_tmatrix(gpm.fitness,neighbor_slicer,neighbors, fixation_model=fixation_model, population_size=population_size) stationary = get_stationary(T) fig, ax = plt.subplots(1,4,figsize=(16,4)) ax[0].imshow(T) ax[1].plot(gpm.fitness,"o") ax[2].plot(stationary,"o") ax[3].plot(gpm.fitness,stationary,"o") ax[0].set_xlabel("genotype") ax[0].set_ylabel("genotype") ax[0].set_title("transition matrix") ax[1].set_xlabel("genotype") ax[1].set_ylabel("fitness") ax[1].set_title("genotype fitness") ax[2].set_xlabel("genotype") ax[2].set_ylabel("relative population") ax[2].set_title("equilibrium population") if np.isclose(np.min(stationary),np.max(stationary)): ax[2].set_ylim((stationary[0]*.95,stationary[0]*1.05)) ax[3].set_xlabel("fitness") ax[3].set_ylabel("relative population") ax[3].set_title("fitness vs. population") if np.isclose(np.min(stationary),np.max(stationary)): ax[3].set_ylim((stationary[0]*.95,stationary[0]*1.05)) fig.suptitle(f"fixation = {fixation_model}, N = {population_size}") plt.tight_layout() plt.show() return T for N in [1,3,10,30,100]: T = get_and_show_T(gpm,fixation_model="moran",population_size=N) plt.show() # Put 1000 individuals at lowest fitness genotype minimum = gpm.data.iloc[np.argmin(gpm.data.fitness),:].name gpm.data.loc[:,"initial_pop"] = np.zeros(len(gpm.data),dtype=int) gpm.data.loc[minimum,"initial_pop"] = 1000 # Simulate with mutation rate 0.001 (average of one mut/generation with pop size of 1000) pops = gpvolve.simulate.simulate(gpm,initial_pop_column="initial_pop",mutation_rate=0.001,num_steps=10000) # Plot only genotypes populated over the simulation populated = np.sum(pops,axis=0) > 0 pops2 = pops[:,populated] # Plot results fig, ax = plt.subplots(1,3,figsize=(15,6)) ax[0].plot(gpm.fitness[populated],np.arange(len(gpm.data.fitness),dtype=int)[populated],"o") ax[0].set_xlabel("fitness") ax[0].set_ylabel("genotype") ax[2].set_title("genotype fitness") ax[1].imshow(pops2.T[:,:101],aspect="auto",origin="lower") ax[1].set_ylabel("genotype") ax[1].set_xlabel("steps") ax[1].set_title("short time dynamics") ax[2].imshow(pops2.T[:,:],aspect="auto",origin="lower") ax[2].set_ylabel("genotype") ax[2].set_xlabel("steps") ax[2].set_title("long time dynamics") fig.tight_layout() plt.show()
0.475362
0.912006
<a href="https://www.kaggle.com/concaption/base-model?scriptVersionId=84673217" target="_blank"><img align="left" alt="Kaggle" title="Open in Kaggle" src="https://kaggle.com/static/images/open-in-kaggle.svg"></a> <a href="https://colab.research.google.com/github/concaption/PepsiCo-Lab-Potato-Quality-Control/blob/main/Potato_Starter_Code_.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` import numpy as np import datetime import tensorflow as tf from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.layers import Flatten, Dense, Dropout from tensorflow.keras import Model from keras.preprocessing.image import ImageDataGenerator import matplotlib.pyplot as plt # -- Global Variables -- TRAIN_PATH = '../input/pepsico-lab-potato-quality-control/Pepsico RnD Potato Lab Dataset/Train' TEST_PATH = '../input/pepsico-lab-potato-quality-control/Pepsico RnD Potato Lab Dataset/Test' BATCH_SIZE = 32 COLOR_MODE = 'rgb' TARGET_SIZE = (255, 255) GRAY_SCALL = (3,) INPUT_SIZE = TARGET_SIZE + GRAY_SCALL EPOCHS = 10 CLASSES = ['Defective','Non-Defective'] # -- Data Normalization -- data_generator = ImageDataGenerator(samplewise_center=True, #making sure that each image has a mean of 0 samplewise_std_normalization=True, #and standard deviation 1 horizontal_flip=True, #Randomly flip inputs horizontally validation_split=0.3, ) # -- Data iterators -- train_data = data_generator.flow_from_directory(directory=TRAIN_PATH, target_size=TARGET_SIZE, batch_size=BATCH_SIZE, class_mode='categorical', color_mode=COLOR_MODE, subset='training', shuffle=True) validation_data = data_generator.flow_from_directory(directory=TRAIN_PATH, target_size=TARGET_SIZE, batch_size=BATCH_SIZE, class_mode='categorical', color_mode=COLOR_MODE, subset='validation', shuffle=True) test_data = data_generator.flow_from_directory(directory=TEST_PATH, target_size=TARGET_SIZE, batch_size=BATCH_SIZE, class_mode='categorical', color_mode=COLOR_MODE, shuffle=True) # -- plot random batch -- images, labels = train_data.next() classes = np.asarray(CLASSES) _, axs = plt.subplots(4, 8, figsize=(12,12)) axs = axs.flatten() for img, label, ax in zip(images, labels, axs): ax.imshow(img) ax.axis('off') label = label.astype(int) ax.set_title(classes[label == 1]) plt.show() def my_model(): vgg19_model = VGG19(weights='imagenet',include_top=False,input_shape=INPUT_SIZE) vgg19_model.trainable = False flatten =Flatten()(vgg19_model.layers[-1].output) fc1 = Dense(units=4096, activation ='relu')(flatten) dropout = Dropout(0.2)(fc1) fc2 = Dense(units=1024,activation='relu')(dropout) output = Dense(2, activation='softmax')(fc2) model = Model(inputs = vgg19_model.input, outputs=output) model.summary() return model model = my_model() tf.keras.utils.plot_model( model, to_file='model.png', show_shapes=True, show_dtype=False, show_layer_names=True, rankdir='T', expand_nested=False, dpi=96 ) # -- Define optimizer and loss -- opt = tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) loss = tf.keras.losses.CategoricalCrossentropy() # -- Compile model -- model.compile(optimizer=opt, loss=loss, metrics=['accuracy']) # -- Callbacks -- checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath='my_model.h5', monitor='accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', save_freq='epoch') earlystoping = tf.keras.callbacks.EarlyStopping(monitor='accuracy', min_delta=0, patience=5, #Number of epochs with no improvement after which training will be stopped. verbose=1, mode='auto') log_dir = './logs/fit/' + datetime.datetime.now().strftime('%m.%d.%Y--%H-%M-%S') tensorboard = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1, write_graph=True, write_images=False, update_freq='epoch') # -- Train model -- history = model.fit(x=train_data, epochs=EPOCHS, steps_per_epoch=len(train_data), verbose=1, validation_data=validation_data, validation_steps=1) # -- Save model -- model.save('my_model.h5') def learning_curves(history): '''plot learning curves''' acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] plt.figure(figsize=(10, 8)) plt.subplot(2, 1, 1) plt.plot(acc, label='Training Accuracy') plt.plot(val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.ylabel('Accuracy') plt.title('Training and Validation Accuracy') plt.subplot(2, 1, 2) plt.plot(loss, label='Training Loss') plt.plot(val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.ylabel('Loss - Cross Entropy') plt.xlabel('epoch') plt.ylim([0,1.6]) plt.title('Training and Validation Loss') plt.show() # -- Plot learning curves -- learning_curves(history) # -- Evaluate the model on the test data -- loss, accuracy = model.evaluate(x=test_data) print("test loss: ", loss, ", test acc: " , 100*accuracy, "%") def defective_or_not(img_path): img = tf.keras.preprocessing.image.load_img(img_path, target_size=(255,255,3)) img = np.asarray(img) img = np.expand_dims(img, axis=0) model = tf.keras.models.load_model('my_model.h5') output = model.predict(img) print(classes[output[0]==1]) # defective_or_not(image.png) ```
github_jupyter
import numpy as np import datetime import tensorflow as tf from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.layers import Flatten, Dense, Dropout from tensorflow.keras import Model from keras.preprocessing.image import ImageDataGenerator import matplotlib.pyplot as plt # -- Global Variables -- TRAIN_PATH = '../input/pepsico-lab-potato-quality-control/Pepsico RnD Potato Lab Dataset/Train' TEST_PATH = '../input/pepsico-lab-potato-quality-control/Pepsico RnD Potato Lab Dataset/Test' BATCH_SIZE = 32 COLOR_MODE = 'rgb' TARGET_SIZE = (255, 255) GRAY_SCALL = (3,) INPUT_SIZE = TARGET_SIZE + GRAY_SCALL EPOCHS = 10 CLASSES = ['Defective','Non-Defective'] # -- Data Normalization -- data_generator = ImageDataGenerator(samplewise_center=True, #making sure that each image has a mean of 0 samplewise_std_normalization=True, #and standard deviation 1 horizontal_flip=True, #Randomly flip inputs horizontally validation_split=0.3, ) # -- Data iterators -- train_data = data_generator.flow_from_directory(directory=TRAIN_PATH, target_size=TARGET_SIZE, batch_size=BATCH_SIZE, class_mode='categorical', color_mode=COLOR_MODE, subset='training', shuffle=True) validation_data = data_generator.flow_from_directory(directory=TRAIN_PATH, target_size=TARGET_SIZE, batch_size=BATCH_SIZE, class_mode='categorical', color_mode=COLOR_MODE, subset='validation', shuffle=True) test_data = data_generator.flow_from_directory(directory=TEST_PATH, target_size=TARGET_SIZE, batch_size=BATCH_SIZE, class_mode='categorical', color_mode=COLOR_MODE, shuffle=True) # -- plot random batch -- images, labels = train_data.next() classes = np.asarray(CLASSES) _, axs = plt.subplots(4, 8, figsize=(12,12)) axs = axs.flatten() for img, label, ax in zip(images, labels, axs): ax.imshow(img) ax.axis('off') label = label.astype(int) ax.set_title(classes[label == 1]) plt.show() def my_model(): vgg19_model = VGG19(weights='imagenet',include_top=False,input_shape=INPUT_SIZE) vgg19_model.trainable = False flatten =Flatten()(vgg19_model.layers[-1].output) fc1 = Dense(units=4096, activation ='relu')(flatten) dropout = Dropout(0.2)(fc1) fc2 = Dense(units=1024,activation='relu')(dropout) output = Dense(2, activation='softmax')(fc2) model = Model(inputs = vgg19_model.input, outputs=output) model.summary() return model model = my_model() tf.keras.utils.plot_model( model, to_file='model.png', show_shapes=True, show_dtype=False, show_layer_names=True, rankdir='T', expand_nested=False, dpi=96 ) # -- Define optimizer and loss -- opt = tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) loss = tf.keras.losses.CategoricalCrossentropy() # -- Compile model -- model.compile(optimizer=opt, loss=loss, metrics=['accuracy']) # -- Callbacks -- checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath='my_model.h5', monitor='accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', save_freq='epoch') earlystoping = tf.keras.callbacks.EarlyStopping(monitor='accuracy', min_delta=0, patience=5, #Number of epochs with no improvement after which training will be stopped. verbose=1, mode='auto') log_dir = './logs/fit/' + datetime.datetime.now().strftime('%m.%d.%Y--%H-%M-%S') tensorboard = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1, write_graph=True, write_images=False, update_freq='epoch') # -- Train model -- history = model.fit(x=train_data, epochs=EPOCHS, steps_per_epoch=len(train_data), verbose=1, validation_data=validation_data, validation_steps=1) # -- Save model -- model.save('my_model.h5') def learning_curves(history): '''plot learning curves''' acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] plt.figure(figsize=(10, 8)) plt.subplot(2, 1, 1) plt.plot(acc, label='Training Accuracy') plt.plot(val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.ylabel('Accuracy') plt.title('Training and Validation Accuracy') plt.subplot(2, 1, 2) plt.plot(loss, label='Training Loss') plt.plot(val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.ylabel('Loss - Cross Entropy') plt.xlabel('epoch') plt.ylim([0,1.6]) plt.title('Training and Validation Loss') plt.show() # -- Plot learning curves -- learning_curves(history) # -- Evaluate the model on the test data -- loss, accuracy = model.evaluate(x=test_data) print("test loss: ", loss, ", test acc: " , 100*accuracy, "%") def defective_or_not(img_path): img = tf.keras.preprocessing.image.load_img(img_path, target_size=(255,255,3)) img = np.asarray(img) img = np.expand_dims(img, axis=0) model = tf.keras.models.load_model('my_model.h5') output = model.predict(img) print(classes[output[0]==1]) # defective_or_not(image.png)
0.709321
0.823044
``` """ You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab. Instructions for setting up Colab are as follows: 1. Open a new Python 3 notebook. 2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL) 3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator) 4. Run this cell to set up dependencies. 5. Restart the runtime (Runtime -> Restart Runtime) for any upgraded packages to take effect """ # If you're using Google Colab and not running locally, run this cell. ## Install dependencies !pip install wget !apt-get install sox libsndfile1 ffmpeg !pip install unidecode !pip install matplotlib>=3.3.2 ## Install NeMo BRANCH = 'main' !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all] ## Grab the config we'll use in this example !mkdir configs !wget -P configs/ https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/asr/conf/config.yaml """ Remember to restart the runtime for the kernel to pick up any upgraded packages (e.g. matplotlib)! Alternatively, you can uncomment the exit() below to crash and restart the kernel, in the case that you want to use the "Run All Cells" (or similar) option. """ # exit() ``` # Streaming ASR In this tutorial, we will look at one way to use one of NeMo's pretrained Conformer-CTC models for streaming inference. We will first look at some use cases where we may need streaming inference and then we will work towards developing a method for transcribing a long audio file using streaming. # Why Stream? Streaming inference may be needed in one of the following scenarios: * Real-time or close to real-time inference for live transcriptions * Offline transcriptions of very long audio In this tutorial, we will mainly focus on streaming for handling long form audio and close to real-time inference with CTC based models. For training ASR models we usually use short segments of audio (<20s) that may be smaller chunks of a long audio that is aligned with the transcriptions and segmented into smaller chunks (see [tools/](https://github.com/NVIDIA/NeMo/tree/main/tools) for some great tools to do this). For running inference on long audio files we are restricted by the available GPU memory that dictates the maximum length of audio that can be transcribed in one inference call. We will take a look at one of the ways to overcome this restriction using NeMo's Conformer-CTC ASR model. # Conformer-CTC Conformer-CTC models distributed with NeMo use a combination of self-attention and convolution modules to achieve the best of the two approaches, the self-attention layers can learn the global interaction while the convolutions efficiently capture the local correlations. Use of self-attention layers comes with a cost of increased memory usage at a quadratic rate with the sequence length. That means that transcribing long audio files with Conformer-CTC models needs streaming inference to break up the audio into smaller chunks. We will develop one method to do such inference through the course of this tutorial. # Data To demonstrate transcribing a long audio file we will use utterances from the dev-clean set of the [mini Librispeech corpus](https://www.openslr.org/31/). ``` # If something goes wrong during data processing, un-comment the following line to delete the cached dataset # !rm -rf datasets/mini-dev-clean !mkdir -p datasets/mini-dev-clean !python ../../scripts/dataset_processing/get_librispeech_data.py \ --data_root "datasets/mini-dev-clean/" \ --data_sets dev_clean_2 manifest = "datasets/mini-dev-clean/dev_clean_2.json" ``` Let's create a long audio that is about 15 minutes long by concatenating audio from dev-clean and also create the corresponding concatenated transcript. ``` import json def concat_audio(manifest_file, final_len=3600): concat_len = 0 final_transcript = "" with open("concat_file.txt", "w") as cat_f: while concat_len < final_len: with open(manifest_file, "r") as mfst_f: for l in mfst_f: row = json.loads(l.strip()) if concat_len >= final_len: break cat_f.write(f"file {row['audio_filepath']}\n") final_transcript += (" " + row['text']) concat_len += float(row['duration']) return concat_len, final_transcript new_duration, ref_transcript = concat_audio(manifest, 15*60) concat_audio_path = "datasets/mini-dev-clean/concatenated_audio.wav" !ffmpeg -t {new_duration} -safe 0 -f concat -i concat_file.txt -c copy -t {new_duration} {concat_audio_path} -y print("Finished concatenating audio file!") ``` # Streaming with CTC based models Now let's try to transcribe the long audio file created above using a conformer-large model. ``` import torch import nemo.collections.asr as nemo_asr import contextlib import gc device = 'cuda' if torch.cuda.is_available() else 'cpu' device ``` We are mainly concerned about decoding on the GPU in this tutorial. CPU decoding may be able to handle longer files but would also not be as fast as GPU decoding. Let's check if we can run transcribe() on the long audio file that we created above. ``` # Clear up memory torch.cuda.empty_cache() gc.collect() model = nemo_asr.models.EncDecCTCModelBPE.from_pretrained("stt_en_conformer_ctc_large", map_location=device) device = 'cuda' if torch.cuda.is_available() else 'cpu' # device = 'cpu' # You can transcribe even longer samples on the CPU, though it will take much longer ! model = model.to(device) # Helper for torch amp autocast if torch.cuda.is_available(): autocast = torch.cuda.amp.autocast else: @contextlib.contextmanager def autocast(): print("AMP was not available, using FP32!") yield ``` The call to transcribe() below should fail with a "CUDA out of memory" error when run on a GPU with 32 GB memory. ``` with autocast(): transcript = model.transcribe([concat_audio_path], batch_size=1)[0] # Clear up memory torch.cuda.empty_cache() gc.collect() ``` # Buffer mechanism for streaming long audio files One way to transcribe long audio with a Conformer-CTC model would be to split the audio into consecutive smaller chunks and running inference on each chunk. Care should be taken to have enough context for audio at either edges for accurate transcription. Let's introduce some terminology here to help us navigate the rest of this tutorial. * Buffer size is the length of audio on which inference is run * Chunk size is the length of new audio that is added to the buffer. An audio buffer is made up of a chunk of audio with some padded audio from previous chunk. In order to make the best predictions with enough context for the beginning and end portions of the buffer, we only collect tokens for the middle portion of the buffer of length equal to the size of each chunk. Let's suppose that the maximum length of audio that can be transcribed with conformer-large model is 20s, then we can use 20s as the buffer size and use 15s (for example) as the chunk size, so one hour of audio is broken into 240 chunks of 15s each. Let's take a look at a few audio buffers that may be created for this audio. ``` # A simple iterator class to return successive chunks of samples class AudioChunkIterator(): def __init__(self, samples, frame_len, sample_rate): self._samples = samples self._chunk_len = chunk_len_in_secs*sample_rate self._start = 0 self.output=True def __iter__(self): return self def __next__(self): if not self.output: raise StopIteration last = int(self._start + self._chunk_len) if last <= len(self._samples): chunk = self._samples[self._start: last] self._start = last else: chunk = np.zeros([int(self._chunk_len)], dtype='float32') samp_len = len(self._samples) - self._start chunk[0:samp_len] = self._samples[self._start:len(self._samples)] self.output = False return chunk # a helper function for extracting samples as a numpy array from the audio file import soundfile as sf def get_samples(audio_file, target_sr=16000): with sf.SoundFile(audio_file, 'r') as f: dtype = 'int16' sample_rate = f.samplerate samples = f.read(dtype=dtype) if sample_rate != target_sr: samples = librosa.core.resample(samples, sample_rate, target_sr) samples=samples.astype('float32')/32768 samples = samples.transpose() return samples ``` Let's take a look at each chunk of speech that is used for decoding. ``` import matplotlib.pyplot as plt samples = get_samples(concat_audio_path) sample_rate = model.preprocessor._cfg['sample_rate'] chunk_len_in_secs = 1 chunk_reader = AudioChunkIterator(samples, chunk_len_in_secs, sample_rate) count = 0 for chunk in chunk_reader: count +=1 plt.plot(chunk) plt.show() if count >= 5: break ``` Now, let's plot the actual buffers at each stage after a new chunk is added to the buffer. Audio buffer can be thought of as a fixed size queue with each incoming chunk added at the end of the buffer and the oldest samples removed from the beginning. ``` import numpy as np context_len_in_secs = 1 buffer_len_in_secs = chunk_len_in_secs + 2* context_len_in_secs buffer_len = sample_rate*buffer_len_in_secs sampbuffer = np.zeros([buffer_len], dtype=np.float32) chunk_reader = AudioChunkIterator(samples, chunk_len_in_secs, sample_rate) chunk_len = sample_rate*chunk_len_in_secs count = 0 for chunk in chunk_reader: count +=1 sampbuffer[:-chunk_len] = sampbuffer[chunk_len:] sampbuffer[-chunk_len:] = chunk plt.plot(sampbuffer) plt.show() if count >= 5: break ``` Now that we have a method to split the long audio into smaller chunks, we can now work on transcribing the individual buffers and merging the outputs to get the transcription of the whole audio. First, we implement some helper functions to help load the buffers into the data layer. ``` from nemo.core.classes import IterableDataset def speech_collate_fn(batch): """collate batch of audio sig, audio len Args: batch (FloatTensor, LongTensor): A tuple of tuples of signal, signal lengths. This collate func assumes the signals are 1d torch tensors (i.e. mono audio). """ _, audio_lengths = zip(*batch) max_audio_len = 0 has_audio = audio_lengths[0] is not None if has_audio: max_audio_len = max(audio_lengths).item() audio_signal= [] for sig, sig_len in batch: if has_audio: sig_len = sig_len.item() if sig_len < max_audio_len: pad = (0, max_audio_len - sig_len) sig = torch.nn.functional.pad(sig, pad) audio_signal.append(sig) if has_audio: audio_signal = torch.stack(audio_signal) audio_lengths = torch.stack(audio_lengths) else: audio_signal, audio_lengths = None, None return audio_signal, audio_lengths # simple data layer to pass audio signal class AudioBuffersDataLayer(IterableDataset): def __init__(self): super().__init__() def __iter__(self): return self def __next__(self): if self._buf_count == len(self.signal) : raise StopIteration self._buf_count +=1 return torch.as_tensor(self.signal[self._buf_count-1], dtype=torch.float32), \ torch.as_tensor(self.signal_shape[0], dtype=torch.int64) def set_signal(self, signals): self.signal = signals self.signal_shape = self.signal[0].shape self._buf_count = 0 def __len__(self): return 1 ``` Next we implement a class that implements transcribing audio buffers and merging the tokens corresponding to a chunk of audio within each buffer. For each buffer, we pick tokens corresponding to one chunk length of audio. The chunk within each buffer is chosen such that there is equal left and right context available to the audio within the chunk. For example, if the chunk size is 1s and buffer size is 3s, we collect tokens corresponding to audio starting from 1s to 2s within each buffer. Conformer-CTC models have a model stride of 4, i.e., a token is produced for every 4 feature vectors in the time domain. MelSpectrogram features are generated once every 10 ms, so a token is produced for every 40 ms of audio. **Note:** The inherent assumption here is that the output tokens from the model are well aligned with corresponding audio segments. This may not always be true for models trained with CTC loss, so this method of streaming inference may not always work with CTC based models. ``` from torch.utils.data import DataLoader import math class ChunkBufferDecoder: def __init__(self,asr_model, stride, chunk_len_in_secs=1, buffer_len_in_secs=3): self.asr_model = asr_model self.asr_model.eval() self.data_layer = AudioBuffersDataLayer() self.data_loader = DataLoader(self.data_layer, batch_size=1, collate_fn=speech_collate_fn) self.buffers = [] self.all_preds = [] self.chunk_len = chunk_len_in_secs self.buffer_len = buffer_len_in_secs assert(chunk_len_in_secs<=buffer_len_in_secs) feature_stride = asr_model._cfg.preprocessor['window_stride'] self.model_stride_in_secs = feature_stride * stride self.n_tokens_per_chunk = math.ceil(self.chunk_len / self.model_stride_in_secs) self.blank_id = len(asr_model.decoder.vocabulary) self.plot=False @torch.no_grad() def transcribe_buffers(self, buffers, merge=True, plot=False): self.plot = plot self.buffers = buffers self.data_layer.set_signal(buffers[:]) self._get_batch_preds() return self.decode_final(merge) def _get_batch_preds(self): device = self.asr_model.device for batch in iter(self.data_loader): audio_signal, audio_signal_len = batch audio_signal, audio_signal_len = audio_signal.to(device), audio_signal_len.to(device) log_probs, encoded_len, predictions = self.asr_model(input_signal=audio_signal, input_signal_length=audio_signal_len) preds = torch.unbind(predictions) for pred in preds: self.all_preds.append(pred.cpu().numpy()) def decode_final(self, merge=True, extra=0): self.unmerged = [] self.toks_unmerged = [] # index for the first token corresponding to a chunk of audio would be len(decoded) - 1 - delay delay = math.ceil((self.chunk_len + (self.buffer_len - self.chunk_len) / 2) / self.model_stride_in_secs) decoded_frames = [] all_toks = [] for pred in self.all_preds: ids, toks = self._greedy_decoder(pred, self.asr_model.tokenizer) decoded_frames.append(ids) all_toks.append(toks) for decoded in decoded_frames: self.unmerged += decoded[len(decoded) - 1 - delay:len(decoded) - 1 - delay + self.n_tokens_per_chunk] if self.plot: for i, tok in enumerate(all_toks): plt.plot(self.buffers[i]) plt.show() print("\nGreedy labels collected from this buffer") print(tok[len(tok) - 1 - delay:len(tok) - 1 - delay + self.n_tokens_per_chunk]) self.toks_unmerged += tok[len(tok) - 1 - delay:len(tok) - 1 - delay + self.n_tokens_per_chunk] print("\nTokens collected from succesive buffers before CTC merge") print(self.toks_unmerged) if not merge: return self.unmerged return self.greedy_merge(self.unmerged) def _greedy_decoder(self, preds, tokenizer): s = [] ids = [] for i in range(preds.shape[0]): if preds[i] == self.blank_id: s.append("_") else: pred = preds[i] s.append(tokenizer.ids_to_tokens([pred.item()])[0]) ids.append(preds[i]) return ids, s def greedy_merge(self, preds): decoded_prediction = [] previous = self.blank_id for p in preds: if (p != previous or previous == self.blank_id) and p != self.blank_id: decoded_prediction.append(p.item()) previous = p hypothesis = self.asr_model.tokenizer.ids_to_text(decoded_prediction) return hypothesis ``` To see how this chunk based decoder comes together, let's call the decoder with a few buffers we create from our long audio file. Some interesting experiments to try would be to see how changing sizes of the chunk and the context affects transcription accuracy. ``` chunk_len_in_secs = 4 context_len_in_secs = 2 buffer_len_in_secs = chunk_len_in_secs + 2* context_len_in_secs n_buffers = 5 buffer_len = sample_rate*buffer_len_in_secs sampbuffer = np.zeros([buffer_len], dtype=np.float32) chunk_reader = AudioChunkIterator(samples, chunk_len_in_secs, sample_rate) chunk_len = sample_rate*chunk_len_in_secs count = 0 buffer_list = [] for chunk in chunk_reader: count +=1 sampbuffer[:-chunk_len] = sampbuffer[chunk_len:] sampbuffer[-chunk_len:] = chunk buffer_list.append(np.array(sampbuffer)) if count >= n_buffers: break stride = 4 # 8 for Citrinet asr_decoder = ChunkBufferDecoder(model, stride=stride, chunk_len_in_secs=chunk_len_in_secs, buffer_len_in_secs=buffer_len_in_secs ) transcription = asr_decoder.transcribe_buffers(buffer_list, plot=True) # Final transcription after CTC merge print(transcription) ``` Time to evaluate our streaming inference on the whole long file that we created. ``` # WER calculation from nemo.collections.asr.metrics.wer import word_error_rate # Collect all buffers from the audio file sampbuffer = np.zeros([buffer_len], dtype=np.float32) chunk_reader = AudioChunkIterator(samples, chunk_len_in_secs, sample_rate) buffer_list = [] for chunk in chunk_reader: sampbuffer[:-chunk_len] = sampbuffer[chunk_len:] sampbuffer[-chunk_len:] = chunk buffer_list.append(np.array(sampbuffer)) asr_decoder = ChunkBufferDecoder(model, stride=stride, chunk_len_in_secs=chunk_len_in_secs, buffer_len_in_secs=buffer_len_in_secs ) transcription = asr_decoder.transcribe_buffers(buffer_list, plot=False) wer = word_error_rate(hypotheses=[transcription], references=[ref_transcript]) print(f"WER: {round(wer*100,2)}%") ```
github_jupyter
""" You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab. Instructions for setting up Colab are as follows: 1. Open a new Python 3 notebook. 2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL) 3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator) 4. Run this cell to set up dependencies. 5. Restart the runtime (Runtime -> Restart Runtime) for any upgraded packages to take effect """ # If you're using Google Colab and not running locally, run this cell. ## Install dependencies !pip install wget !apt-get install sox libsndfile1 ffmpeg !pip install unidecode !pip install matplotlib>=3.3.2 ## Install NeMo BRANCH = 'main' !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all] ## Grab the config we'll use in this example !mkdir configs !wget -P configs/ https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/asr/conf/config.yaml """ Remember to restart the runtime for the kernel to pick up any upgraded packages (e.g. matplotlib)! Alternatively, you can uncomment the exit() below to crash and restart the kernel, in the case that you want to use the "Run All Cells" (or similar) option. """ # exit() # If something goes wrong during data processing, un-comment the following line to delete the cached dataset # !rm -rf datasets/mini-dev-clean !mkdir -p datasets/mini-dev-clean !python ../../scripts/dataset_processing/get_librispeech_data.py \ --data_root "datasets/mini-dev-clean/" \ --data_sets dev_clean_2 manifest = "datasets/mini-dev-clean/dev_clean_2.json" import json def concat_audio(manifest_file, final_len=3600): concat_len = 0 final_transcript = "" with open("concat_file.txt", "w") as cat_f: while concat_len < final_len: with open(manifest_file, "r") as mfst_f: for l in mfst_f: row = json.loads(l.strip()) if concat_len >= final_len: break cat_f.write(f"file {row['audio_filepath']}\n") final_transcript += (" " + row['text']) concat_len += float(row['duration']) return concat_len, final_transcript new_duration, ref_transcript = concat_audio(manifest, 15*60) concat_audio_path = "datasets/mini-dev-clean/concatenated_audio.wav" !ffmpeg -t {new_duration} -safe 0 -f concat -i concat_file.txt -c copy -t {new_duration} {concat_audio_path} -y print("Finished concatenating audio file!") import torch import nemo.collections.asr as nemo_asr import contextlib import gc device = 'cuda' if torch.cuda.is_available() else 'cpu' device # Clear up memory torch.cuda.empty_cache() gc.collect() model = nemo_asr.models.EncDecCTCModelBPE.from_pretrained("stt_en_conformer_ctc_large", map_location=device) device = 'cuda' if torch.cuda.is_available() else 'cpu' # device = 'cpu' # You can transcribe even longer samples on the CPU, though it will take much longer ! model = model.to(device) # Helper for torch amp autocast if torch.cuda.is_available(): autocast = torch.cuda.amp.autocast else: @contextlib.contextmanager def autocast(): print("AMP was not available, using FP32!") yield with autocast(): transcript = model.transcribe([concat_audio_path], batch_size=1)[0] # Clear up memory torch.cuda.empty_cache() gc.collect() # A simple iterator class to return successive chunks of samples class AudioChunkIterator(): def __init__(self, samples, frame_len, sample_rate): self._samples = samples self._chunk_len = chunk_len_in_secs*sample_rate self._start = 0 self.output=True def __iter__(self): return self def __next__(self): if not self.output: raise StopIteration last = int(self._start + self._chunk_len) if last <= len(self._samples): chunk = self._samples[self._start: last] self._start = last else: chunk = np.zeros([int(self._chunk_len)], dtype='float32') samp_len = len(self._samples) - self._start chunk[0:samp_len] = self._samples[self._start:len(self._samples)] self.output = False return chunk # a helper function for extracting samples as a numpy array from the audio file import soundfile as sf def get_samples(audio_file, target_sr=16000): with sf.SoundFile(audio_file, 'r') as f: dtype = 'int16' sample_rate = f.samplerate samples = f.read(dtype=dtype) if sample_rate != target_sr: samples = librosa.core.resample(samples, sample_rate, target_sr) samples=samples.astype('float32')/32768 samples = samples.transpose() return samples import matplotlib.pyplot as plt samples = get_samples(concat_audio_path) sample_rate = model.preprocessor._cfg['sample_rate'] chunk_len_in_secs = 1 chunk_reader = AudioChunkIterator(samples, chunk_len_in_secs, sample_rate) count = 0 for chunk in chunk_reader: count +=1 plt.plot(chunk) plt.show() if count >= 5: break import numpy as np context_len_in_secs = 1 buffer_len_in_secs = chunk_len_in_secs + 2* context_len_in_secs buffer_len = sample_rate*buffer_len_in_secs sampbuffer = np.zeros([buffer_len], dtype=np.float32) chunk_reader = AudioChunkIterator(samples, chunk_len_in_secs, sample_rate) chunk_len = sample_rate*chunk_len_in_secs count = 0 for chunk in chunk_reader: count +=1 sampbuffer[:-chunk_len] = sampbuffer[chunk_len:] sampbuffer[-chunk_len:] = chunk plt.plot(sampbuffer) plt.show() if count >= 5: break from nemo.core.classes import IterableDataset def speech_collate_fn(batch): """collate batch of audio sig, audio len Args: batch (FloatTensor, LongTensor): A tuple of tuples of signal, signal lengths. This collate func assumes the signals are 1d torch tensors (i.e. mono audio). """ _, audio_lengths = zip(*batch) max_audio_len = 0 has_audio = audio_lengths[0] is not None if has_audio: max_audio_len = max(audio_lengths).item() audio_signal= [] for sig, sig_len in batch: if has_audio: sig_len = sig_len.item() if sig_len < max_audio_len: pad = (0, max_audio_len - sig_len) sig = torch.nn.functional.pad(sig, pad) audio_signal.append(sig) if has_audio: audio_signal = torch.stack(audio_signal) audio_lengths = torch.stack(audio_lengths) else: audio_signal, audio_lengths = None, None return audio_signal, audio_lengths # simple data layer to pass audio signal class AudioBuffersDataLayer(IterableDataset): def __init__(self): super().__init__() def __iter__(self): return self def __next__(self): if self._buf_count == len(self.signal) : raise StopIteration self._buf_count +=1 return torch.as_tensor(self.signal[self._buf_count-1], dtype=torch.float32), \ torch.as_tensor(self.signal_shape[0], dtype=torch.int64) def set_signal(self, signals): self.signal = signals self.signal_shape = self.signal[0].shape self._buf_count = 0 def __len__(self): return 1 from torch.utils.data import DataLoader import math class ChunkBufferDecoder: def __init__(self,asr_model, stride, chunk_len_in_secs=1, buffer_len_in_secs=3): self.asr_model = asr_model self.asr_model.eval() self.data_layer = AudioBuffersDataLayer() self.data_loader = DataLoader(self.data_layer, batch_size=1, collate_fn=speech_collate_fn) self.buffers = [] self.all_preds = [] self.chunk_len = chunk_len_in_secs self.buffer_len = buffer_len_in_secs assert(chunk_len_in_secs<=buffer_len_in_secs) feature_stride = asr_model._cfg.preprocessor['window_stride'] self.model_stride_in_secs = feature_stride * stride self.n_tokens_per_chunk = math.ceil(self.chunk_len / self.model_stride_in_secs) self.blank_id = len(asr_model.decoder.vocabulary) self.plot=False @torch.no_grad() def transcribe_buffers(self, buffers, merge=True, plot=False): self.plot = plot self.buffers = buffers self.data_layer.set_signal(buffers[:]) self._get_batch_preds() return self.decode_final(merge) def _get_batch_preds(self): device = self.asr_model.device for batch in iter(self.data_loader): audio_signal, audio_signal_len = batch audio_signal, audio_signal_len = audio_signal.to(device), audio_signal_len.to(device) log_probs, encoded_len, predictions = self.asr_model(input_signal=audio_signal, input_signal_length=audio_signal_len) preds = torch.unbind(predictions) for pred in preds: self.all_preds.append(pred.cpu().numpy()) def decode_final(self, merge=True, extra=0): self.unmerged = [] self.toks_unmerged = [] # index for the first token corresponding to a chunk of audio would be len(decoded) - 1 - delay delay = math.ceil((self.chunk_len + (self.buffer_len - self.chunk_len) / 2) / self.model_stride_in_secs) decoded_frames = [] all_toks = [] for pred in self.all_preds: ids, toks = self._greedy_decoder(pred, self.asr_model.tokenizer) decoded_frames.append(ids) all_toks.append(toks) for decoded in decoded_frames: self.unmerged += decoded[len(decoded) - 1 - delay:len(decoded) - 1 - delay + self.n_tokens_per_chunk] if self.plot: for i, tok in enumerate(all_toks): plt.plot(self.buffers[i]) plt.show() print("\nGreedy labels collected from this buffer") print(tok[len(tok) - 1 - delay:len(tok) - 1 - delay + self.n_tokens_per_chunk]) self.toks_unmerged += tok[len(tok) - 1 - delay:len(tok) - 1 - delay + self.n_tokens_per_chunk] print("\nTokens collected from succesive buffers before CTC merge") print(self.toks_unmerged) if not merge: return self.unmerged return self.greedy_merge(self.unmerged) def _greedy_decoder(self, preds, tokenizer): s = [] ids = [] for i in range(preds.shape[0]): if preds[i] == self.blank_id: s.append("_") else: pred = preds[i] s.append(tokenizer.ids_to_tokens([pred.item()])[0]) ids.append(preds[i]) return ids, s def greedy_merge(self, preds): decoded_prediction = [] previous = self.blank_id for p in preds: if (p != previous or previous == self.blank_id) and p != self.blank_id: decoded_prediction.append(p.item()) previous = p hypothesis = self.asr_model.tokenizer.ids_to_text(decoded_prediction) return hypothesis chunk_len_in_secs = 4 context_len_in_secs = 2 buffer_len_in_secs = chunk_len_in_secs + 2* context_len_in_secs n_buffers = 5 buffer_len = sample_rate*buffer_len_in_secs sampbuffer = np.zeros([buffer_len], dtype=np.float32) chunk_reader = AudioChunkIterator(samples, chunk_len_in_secs, sample_rate) chunk_len = sample_rate*chunk_len_in_secs count = 0 buffer_list = [] for chunk in chunk_reader: count +=1 sampbuffer[:-chunk_len] = sampbuffer[chunk_len:] sampbuffer[-chunk_len:] = chunk buffer_list.append(np.array(sampbuffer)) if count >= n_buffers: break stride = 4 # 8 for Citrinet asr_decoder = ChunkBufferDecoder(model, stride=stride, chunk_len_in_secs=chunk_len_in_secs, buffer_len_in_secs=buffer_len_in_secs ) transcription = asr_decoder.transcribe_buffers(buffer_list, plot=True) # Final transcription after CTC merge print(transcription) # WER calculation from nemo.collections.asr.metrics.wer import word_error_rate # Collect all buffers from the audio file sampbuffer = np.zeros([buffer_len], dtype=np.float32) chunk_reader = AudioChunkIterator(samples, chunk_len_in_secs, sample_rate) buffer_list = [] for chunk in chunk_reader: sampbuffer[:-chunk_len] = sampbuffer[chunk_len:] sampbuffer[-chunk_len:] = chunk buffer_list.append(np.array(sampbuffer)) asr_decoder = ChunkBufferDecoder(model, stride=stride, chunk_len_in_secs=chunk_len_in_secs, buffer_len_in_secs=buffer_len_in_secs ) transcription = asr_decoder.transcribe_buffers(buffer_list, plot=False) wer = word_error_rate(hypotheses=[transcription], references=[ref_transcript]) print(f"WER: {round(wer*100,2)}%")
0.789558
0.831349
# Importing Required Libraries ``` import os import cv2 import pandas as pd import tensorflow as tf import seaborn as sns from keras.utils import np_utils import tensorflow.keras from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Activation, Dense, Dropout from tensorflow.keras.optimizers import Adamax from tensorflow.keras.applications import VGG16 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.vgg16 import preprocess_input from tensorflow.keras.layers import Input, Flatten, Dense from tensorflow.keras.models import Model ``` # Importing Dataset ``` raw_data_csv_file_name = 'data/fer2013.csv' raw_data = pd.read_csv(raw_data_csv_file_name) raw_data.info() raw_data.head() raw_data["Usage"].value_counts() ``` # Viewing images in Dataset ``` %matplotlib inline import matplotlib import matplotlib.pyplot as plt import numpy as np def show_image_and_label(x, y): x_reshaped = x.reshape(48,48) plt.imshow(x_reshaped, cmap= "gray", interpolation="nearest") plt.axis("off") plt.show() print(y) # x_pixels img = raw_data["pixels"][0] val = img.split(" ") x_pixels = np.array(val, 'float32') x_pixels /= 255 np.shape(x_pixels) show_image_and_label(x_pixels, raw_data["emotion"][0]) ``` (0=Angry, 1=Disgust, 2=Fear, 3=Happy, 4=Sad, 5=Surprise, 6=Neutral) # Required Functions For Preprocessing of Dataset ``` TRAIN_END = 28708 TEST_START = TRAIN_END + 1 NUM_CLASSES = 7 IMG_SIZE = 48 def split_for_test(list): train = list[0:TRAIN_END] test = list[TEST_START:] return train, test def pandas_vector_to_list(pandas_df): py_list = [item[0] for item in pandas_df.values.tolist()] return py_list def process_emotion(emotion): """ Takes in a vector of emotions and outputs a list of emotions as one-hot vectors. :param emotion: vector of ints (0-7) :return: list of one-hot vectors (array of 7) """ emotion_as_list = pandas_vector_to_list(emotion) y_data = [] for index in range(len(emotion_as_list)): y_data.append(emotion_as_list[index]) # Y data y_data_categorical = np_utils.to_categorical(y_data, NUM_CLASSES) return y_data_categorical def process_pixels(pixels, img_size=48): """ Takes in a string (pixels) that has space separated ints. Will transform the ints to a 48x48 matrix of floats(/255). :param pixels: string with space separated ints :param img_size: image size :return: array of 48x48 matrices """ pixels_as_list = pandas_vector_to_list(pixels) np_image_array = [] for index, item in enumerate(pixels_as_list): # 48x48 data = np.zeros((img_size, img_size), dtype=np.uint8) # split space separated ints pixel_data = item.split() # 0 -> 47, loop through the rows for i in range(0, img_size): # (0 = 0), (1 = 47), (2 = 94), ... pixel_index = i * img_size # (0 = [0:47]), (1 = [47: 94]), (2 = [94, 141]), ... data[i] = pixel_data[pixel_index:pixel_index + img_size] np_image_array.append(np.array(data)) np_image_array = np.array(np_image_array) # convert to float and divide by 255 np_image_array = np_image_array.astype('float32') / 255.0 return np_image_array def duplicate_input_layer(array_input, size): vg_input = np.empty([size, 48, 48, 3]) for index, item in enumerate(vg_input): item[:, :, 0] = array_input[index] item[:, :, 1] = array_input[index] item[:, :, 2] = array_input[index] return vg_input ``` # Pre-Processing Dataset ``` # convert to one hot vectors emotion_array = process_emotion(raw_data[['emotion']]) # convert to a 48x48 float matrix pixel_array = process_pixels(raw_data[['pixels']]) ``` # Splitting Train-Test Data ``` # split for test/train y_train, y_test = split_for_test(emotion_array) x_train_matrix, x_test_matrix = split_for_test(pixel_array) n_train = int(len(x_train_matrix)) n_test = int(len(x_test_matrix)) x_train_input = duplicate_input_layer(x_train_matrix, n_train) x_test_input = duplicate_input_layer(x_test_matrix, n_test) ``` # Defining Model ``` #Get back the convolutional part of a VGG network trained on ImageNet model_vgg16_conv = VGG16(weights='imagenet', include_top=False,) model_vgg16_conv.summary() inp = Input(shape=(48, 48, 3),name = 'image_input') #Use the generated model output_vgg16_conv = model_vgg16_conv(inp) #Add the fully-connected layers x = Flatten(name='flatten')(output_vgg16_conv) x = Dense(4096, activation='relu', name='fc1')(x) x = Dense(4096, activation='relu', name='fc2')(x) x = Dense(7, activation='softmax', name='predictions')(x) #Create your own model my_model = Model(inp,x) #In the summary, weights and layers from VGG part will be hidden, but they will be fit during the training my_model.summary() np.shape(x_train_input) ``` ### Optimizer for Model and Compiling Model ``` sgd = tf.keras.optimizers.SGD(learning_rate=0.01) my_model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy']) ``` ### Creating Checkpoint Callback for Training ``` checkpoint_path = "current_training/checkpoint/cp-{epoch:04d}.ckpt" #checkpoint_path = "training_1/cp.ckpt" checkpoint_dir = os.path.dirname(checkpoint_path) # Create a callback that saves the model's weights cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, save_weights_only=True, verbose=1) ``` ### Training Model ``` my_model.fit(x_train_input, y_train, epochs=80, batch_size=32, validation_data=(x_test_input, y_test), callbacks=[cp_callback]) ``` # Saving Trained Model ``` # Save the entire model as a SavedModel. !mkdir -p saved_model my_model.save('saved_model/my_model') ``` ### Loading Pre-trained Model with Weights included ``` new_model = tf.keras.models.load_model('saved_model/my_model') # Check its architecture new_model.summary() # Restore the weights #new_model.load_weights('cp-0074.ckpt') ``` # Evaluating Accuracy on Training Data ``` # Evaluate the restored model loss, acc = new_model.evaluate(x_train_input, y_train, verbose=2) print('Restored model, accuracy: {:5.2f}%'.format(100*acc)) print(new_model.predict(x_train_input).shape) ``` # Evaluating Accuracy on Test Data ``` # Evaluate the restored model loss, acc = new_model.evaluate(x_test_input, y_test, verbose=2) print('Restored model, accuracy: {:5.2f}%'.format(100*acc)) print(new_model.predict(x_test_input).shape) ``` ## Predict from a single image #### EMOTION_DICT = {1:"ANGRY", 2:"DISGUST", 3:"FEAR", 4:"HAPPY", 5:"SAD", 6:"SURPRISE", 7:"NEUTRAL"} ``` import cv2 path = 'fear2.jpg' img = cv2.imread(path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) cv2.imwrite(path, gray) face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') img = cv2.imread(path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.3, 5) for (x,y,w,h) in faces: face_clip = img[y:y+h, x:x+w] cv2.imwrite(path, cv2.resize(face_clip, (48, 48))) read_image = cv2.imread(path) read_image = read_image.reshape(1, read_image.shape[0], read_image.shape[1], read_image.shape[2]) read_image_final = read_image/255.0 top_pred = new_model.predict(read_image_final) emotion_label = top_pred[0].argmax() + 1 emotion_label ```
github_jupyter
import os import cv2 import pandas as pd import tensorflow as tf import seaborn as sns from keras.utils import np_utils import tensorflow.keras from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Activation, Dense, Dropout from tensorflow.keras.optimizers import Adamax from tensorflow.keras.applications import VGG16 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.vgg16 import preprocess_input from tensorflow.keras.layers import Input, Flatten, Dense from tensorflow.keras.models import Model raw_data_csv_file_name = 'data/fer2013.csv' raw_data = pd.read_csv(raw_data_csv_file_name) raw_data.info() raw_data.head() raw_data["Usage"].value_counts() %matplotlib inline import matplotlib import matplotlib.pyplot as plt import numpy as np def show_image_and_label(x, y): x_reshaped = x.reshape(48,48) plt.imshow(x_reshaped, cmap= "gray", interpolation="nearest") plt.axis("off") plt.show() print(y) # x_pixels img = raw_data["pixels"][0] val = img.split(" ") x_pixels = np.array(val, 'float32') x_pixels /= 255 np.shape(x_pixels) show_image_and_label(x_pixels, raw_data["emotion"][0]) TRAIN_END = 28708 TEST_START = TRAIN_END + 1 NUM_CLASSES = 7 IMG_SIZE = 48 def split_for_test(list): train = list[0:TRAIN_END] test = list[TEST_START:] return train, test def pandas_vector_to_list(pandas_df): py_list = [item[0] for item in pandas_df.values.tolist()] return py_list def process_emotion(emotion): """ Takes in a vector of emotions and outputs a list of emotions as one-hot vectors. :param emotion: vector of ints (0-7) :return: list of one-hot vectors (array of 7) """ emotion_as_list = pandas_vector_to_list(emotion) y_data = [] for index in range(len(emotion_as_list)): y_data.append(emotion_as_list[index]) # Y data y_data_categorical = np_utils.to_categorical(y_data, NUM_CLASSES) return y_data_categorical def process_pixels(pixels, img_size=48): """ Takes in a string (pixels) that has space separated ints. Will transform the ints to a 48x48 matrix of floats(/255). :param pixels: string with space separated ints :param img_size: image size :return: array of 48x48 matrices """ pixels_as_list = pandas_vector_to_list(pixels) np_image_array = [] for index, item in enumerate(pixels_as_list): # 48x48 data = np.zeros((img_size, img_size), dtype=np.uint8) # split space separated ints pixel_data = item.split() # 0 -> 47, loop through the rows for i in range(0, img_size): # (0 = 0), (1 = 47), (2 = 94), ... pixel_index = i * img_size # (0 = [0:47]), (1 = [47: 94]), (2 = [94, 141]), ... data[i] = pixel_data[pixel_index:pixel_index + img_size] np_image_array.append(np.array(data)) np_image_array = np.array(np_image_array) # convert to float and divide by 255 np_image_array = np_image_array.astype('float32') / 255.0 return np_image_array def duplicate_input_layer(array_input, size): vg_input = np.empty([size, 48, 48, 3]) for index, item in enumerate(vg_input): item[:, :, 0] = array_input[index] item[:, :, 1] = array_input[index] item[:, :, 2] = array_input[index] return vg_input # convert to one hot vectors emotion_array = process_emotion(raw_data[['emotion']]) # convert to a 48x48 float matrix pixel_array = process_pixels(raw_data[['pixels']]) # split for test/train y_train, y_test = split_for_test(emotion_array) x_train_matrix, x_test_matrix = split_for_test(pixel_array) n_train = int(len(x_train_matrix)) n_test = int(len(x_test_matrix)) x_train_input = duplicate_input_layer(x_train_matrix, n_train) x_test_input = duplicate_input_layer(x_test_matrix, n_test) #Get back the convolutional part of a VGG network trained on ImageNet model_vgg16_conv = VGG16(weights='imagenet', include_top=False,) model_vgg16_conv.summary() inp = Input(shape=(48, 48, 3),name = 'image_input') #Use the generated model output_vgg16_conv = model_vgg16_conv(inp) #Add the fully-connected layers x = Flatten(name='flatten')(output_vgg16_conv) x = Dense(4096, activation='relu', name='fc1')(x) x = Dense(4096, activation='relu', name='fc2')(x) x = Dense(7, activation='softmax', name='predictions')(x) #Create your own model my_model = Model(inp,x) #In the summary, weights and layers from VGG part will be hidden, but they will be fit during the training my_model.summary() np.shape(x_train_input) sgd = tf.keras.optimizers.SGD(learning_rate=0.01) my_model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy']) checkpoint_path = "current_training/checkpoint/cp-{epoch:04d}.ckpt" #checkpoint_path = "training_1/cp.ckpt" checkpoint_dir = os.path.dirname(checkpoint_path) # Create a callback that saves the model's weights cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, save_weights_only=True, verbose=1) my_model.fit(x_train_input, y_train, epochs=80, batch_size=32, validation_data=(x_test_input, y_test), callbacks=[cp_callback]) # Save the entire model as a SavedModel. !mkdir -p saved_model my_model.save('saved_model/my_model') new_model = tf.keras.models.load_model('saved_model/my_model') # Check its architecture new_model.summary() # Restore the weights #new_model.load_weights('cp-0074.ckpt') # Evaluate the restored model loss, acc = new_model.evaluate(x_train_input, y_train, verbose=2) print('Restored model, accuracy: {:5.2f}%'.format(100*acc)) print(new_model.predict(x_train_input).shape) # Evaluate the restored model loss, acc = new_model.evaluate(x_test_input, y_test, verbose=2) print('Restored model, accuracy: {:5.2f}%'.format(100*acc)) print(new_model.predict(x_test_input).shape) import cv2 path = 'fear2.jpg' img = cv2.imread(path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) cv2.imwrite(path, gray) face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') img = cv2.imread(path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.3, 5) for (x,y,w,h) in faces: face_clip = img[y:y+h, x:x+w] cv2.imwrite(path, cv2.resize(face_clip, (48, 48))) read_image = cv2.imread(path) read_image = read_image.reshape(1, read_image.shape[0], read_image.shape[1], read_image.shape[2]) read_image_final = read_image/255.0 top_pred = new_model.predict(read_image_final) emotion_label = top_pred[0].argmax() + 1 emotion_label
0.749362
0.856092
# NLTK Day 5 ``` import nltk from nltk.tokenize import word_tokenize from nltk import ne_chunk ``` # Topic: Chunking ## Example 1: ### 1. Write a sentence ``` NE_sent = "US president stays in the WHITE HOUSE" ``` ### 2. Word_tokenize the sentence: ``` NE_tokens = word_tokenize(NE_sent) NE_tokens ``` ### 3. Apply POS : Parts of Speech for the word_tokized sentence ``` NE_tags = nltk.pos_tag(NE_tokens) NE_tags ``` ### 4. Apply chunking: ``` NE_ner = ne_chunk(NE_tags) print(NE_ner) NE_ner ``` ### Notice the Tree in the above ouput: ### "Us president stays in the" , " WHITE HOUSE " # print(NE_ner) : if print() not mentioned, possibility of getting an error because some property(ies) of jupyter nb have to be changed. # Example 2: ``` NE_sent2 = "Donald Trump gives black men goosebumps" NE_tokens2 = word_tokenize(NE_sent2) NE_tokens2 NE_tags2 = nltk.pos_tag(NE_tokens2) NE_tags2 NE_ner2 = ne_chunk(NE_tags2) print(NE_ner2) ``` # Example 3: ``` NE_sent3 = "Rahul Gandhi lost his pocket money worth 100 million dollars while travelling in the Mumbai Local" NE_tokens3 = word_tokenize(NE_sent3) NE_tokens3 NE_tags3 = nltk.pos_tag(NE_tokens3) NE_tags3 NE_ner3 = ne_chunk(NE_tags3) print(NE_ner3) ``` # Chunking part 2 ``` new_sent = "The quick brown fox jumped over the lazy dog" new_tokens = nltk.pos_tag(word_tokenize(new_sent)) grammer_np = r"NP: {<DT>?<]]>*<NN>}" chunk_parser = nltk.RegexpParser(grammer_np) chunk_result = chunk_parser.parse(new_tokens) print(chunk_result) chunk_result #Check the output, ignore the error ``` ### Notice in the Tree: ### "the quick" , "brown" , "fox jumped over" , "the lazy" , "dog" # [CHATBOTS ] # [Elliot] ``` import nltk import warnings warnings.filterwarnings("ignore") import numpy as np import random import string #To process standard python srings from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity #nltk.download("punkt") if not available #nltk.download("wordnet") if not available import os os.getcwd() os.chdir("C:\\Users\\MAHE\\Desktop\\Data Science\\HYD\\Topic-Wise\\NLP") f = open("chatbot.txt","r",errors = "ignore") raw = f.read() raw = raw.lower() #Converts the whole text file in lowercase raw sent_tokens = nltk.sent_tokenize(raw) #Converts to list of sentences word_tokens = nltk.word_tokenize(raw) #Converts to list of words sent_tokens[:2] word_tokens[:5] lemmer = nltk.stem.WordNetLemmatizer() remove_punct_dict = dict((ord(i),None) for i in string.punctuation) def LemTokens(tokens): return [lemmer.lemmatize(i) for i in tokens] def LemNormalize(text): return LemTokens(nltk.word_tokenize(text.lower().translate(remove_punct_dict))) def greeting(sentence): """ if user's input is a greeting, return a greeting response """ for i in sentence.split(): if i.lower() in GREETINGS_INPUTS: return random.choice(GREETINGS_RESPONSE) def response(user_response): elliot_response = "" sent_tokens.append(user_response) TfidfVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words="english") tfidf = TfidfVec.fit_transform(sent_tokens) vals = cosine_similarity(tfidf[-1],tfidf) idx = vals.argsort()[0][-2] flat = vals.flatten() flat.sort() req_tfidf = flat[-2] if(req_tfidf==0): elliot_response = elliot_response+" Bro kinda cringe but I can't understand you bro " return elliot_response else: elliot_response = elliot_response+sent_tokens[idx] return elliot_response flag = True print("ELLIOT: My Name is Elliot, you can call me El. If you want to exit, type 'bye'") while(flag==True): user_response = input() user_response = user_response.lower() if user_response!="bye": if(user_response=="thanks" or user_response=="thank you"): flag = False print("Elliot: Welcome bro") else: if(greeting(user_response)!=None): print("Elliot: "+greeting(user_response)) else: print("Elliot: ",end="") print(response(user_response)) sent_tokens.remove(user_response) else: flag = False print("Elliot: Bro bye and take care, see you later bro") GREETINGS_INPUTS = ["yo","sup","hi","sup","yo"] GREETINGS_RESPONSE = ["Ay yo my nigs!","Ay yo my homie!","Muh nigs ;_; been so long","bruh","You have officialy entered the bruh moment bruh"] ```
github_jupyter
import nltk from nltk.tokenize import word_tokenize from nltk import ne_chunk NE_sent = "US president stays in the WHITE HOUSE" NE_tokens = word_tokenize(NE_sent) NE_tokens NE_tags = nltk.pos_tag(NE_tokens) NE_tags NE_ner = ne_chunk(NE_tags) print(NE_ner) NE_ner NE_sent2 = "Donald Trump gives black men goosebumps" NE_tokens2 = word_tokenize(NE_sent2) NE_tokens2 NE_tags2 = nltk.pos_tag(NE_tokens2) NE_tags2 NE_ner2 = ne_chunk(NE_tags2) print(NE_ner2) NE_sent3 = "Rahul Gandhi lost his pocket money worth 100 million dollars while travelling in the Mumbai Local" NE_tokens3 = word_tokenize(NE_sent3) NE_tokens3 NE_tags3 = nltk.pos_tag(NE_tokens3) NE_tags3 NE_ner3 = ne_chunk(NE_tags3) print(NE_ner3) new_sent = "The quick brown fox jumped over the lazy dog" new_tokens = nltk.pos_tag(word_tokenize(new_sent)) grammer_np = r"NP: {<DT>?<]]>*<NN>}" chunk_parser = nltk.RegexpParser(grammer_np) chunk_result = chunk_parser.parse(new_tokens) print(chunk_result) chunk_result #Check the output, ignore the error import nltk import warnings warnings.filterwarnings("ignore") import numpy as np import random import string #To process standard python srings from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity #nltk.download("punkt") if not available #nltk.download("wordnet") if not available import os os.getcwd() os.chdir("C:\\Users\\MAHE\\Desktop\\Data Science\\HYD\\Topic-Wise\\NLP") f = open("chatbot.txt","r",errors = "ignore") raw = f.read() raw = raw.lower() #Converts the whole text file in lowercase raw sent_tokens = nltk.sent_tokenize(raw) #Converts to list of sentences word_tokens = nltk.word_tokenize(raw) #Converts to list of words sent_tokens[:2] word_tokens[:5] lemmer = nltk.stem.WordNetLemmatizer() remove_punct_dict = dict((ord(i),None) for i in string.punctuation) def LemTokens(tokens): return [lemmer.lemmatize(i) for i in tokens] def LemNormalize(text): return LemTokens(nltk.word_tokenize(text.lower().translate(remove_punct_dict))) def greeting(sentence): """ if user's input is a greeting, return a greeting response """ for i in sentence.split(): if i.lower() in GREETINGS_INPUTS: return random.choice(GREETINGS_RESPONSE) def response(user_response): elliot_response = "" sent_tokens.append(user_response) TfidfVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words="english") tfidf = TfidfVec.fit_transform(sent_tokens) vals = cosine_similarity(tfidf[-1],tfidf) idx = vals.argsort()[0][-2] flat = vals.flatten() flat.sort() req_tfidf = flat[-2] if(req_tfidf==0): elliot_response = elliot_response+" Bro kinda cringe but I can't understand you bro " return elliot_response else: elliot_response = elliot_response+sent_tokens[idx] return elliot_response flag = True print("ELLIOT: My Name is Elliot, you can call me El. If you want to exit, type 'bye'") while(flag==True): user_response = input() user_response = user_response.lower() if user_response!="bye": if(user_response=="thanks" or user_response=="thank you"): flag = False print("Elliot: Welcome bro") else: if(greeting(user_response)!=None): print("Elliot: "+greeting(user_response)) else: print("Elliot: ",end="") print(response(user_response)) sent_tokens.remove(user_response) else: flag = False print("Elliot: Bro bye and take care, see you later bro") GREETINGS_INPUTS = ["yo","sup","hi","sup","yo"] GREETINGS_RESPONSE = ["Ay yo my nigs!","Ay yo my homie!","Muh nigs ;_; been so long","bruh","You have officialy entered the bruh moment bruh"]
0.210036
0.825976
# Generative Adversarial Network In this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits! GANs were [first reported on](https://arxiv.org/abs/1406.2661) in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out: * [Pix2Pix](https://affinelayer.com/pixsrv/) * [CycleGAN](https://github.com/junyanz/CycleGAN) * [A whole list](https://github.com/wiseodd/generative-models) The idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks _as close as possible_ to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator. ![GAN diagram](assets/gan_diagram.png) The general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can fool the discriminator. The output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow. ``` %matplotlib inline import pickle as pkl import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data') ``` ## Model Inputs First we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input `inputs_real` and the generator input `inputs_z`. We'll assign them the appropriate sizes for each of the networks. >**Exercise:** Finish the `model_inputs` function below. Create the placeholders for `inputs_real` and `inputs_z` using the input sizes `real_dim` and `z_dim` respectively. ``` def model_inputs(real_dim, z_dim): inputs_real = tf.placeholder(tf.float32, (None, real_dim), name = 'inputs_real') inputs_z = tf.placeholder(tf.float32, (None, z_dim), name = 'inputs_z') return inputs_real, inputs_z ``` ## Generator network ![GAN Network](assets/gan_network.png) Here we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values. #### Variable Scope Here we need to use `tf.variable_scope` for two reasons. Firstly, we're going to make sure all the variable names start with `generator`. Similarly, we'll prepend `discriminator` to the discriminator variables. This will help out later when we're training the separate networks. We could just use `tf.name_scope` to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also _sample from it_ as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the `reuse` keyword for `tf.variable_scope` to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again. To use `tf.variable_scope`, you use a `with` statement: ```python with tf.variable_scope('scope_name', reuse=False): # code here ``` Here's more from [the TensorFlow documentation](https://www.tensorflow.org/programmers_guide/variable_scope#the_problem) to get another look at using `tf.variable_scope`. #### Leaky ReLU TensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can just take the outputs from a linear fully connected layer and pass them to `tf.maximum`. Typically, a parameter `alpha` sets the magnitude of the output for negative values. So, the output for negative input (`x`) values is `alpha*x`, and the output for positive `x` is `x`: $$ f(x) = max(\alpha * x, x) $$ #### Tanh Output The generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1. >**Exercise:** Implement the generator network in the function below. You'll need to return the tanh output. Make sure to wrap your code in a variable scope, with 'generator' as the scope name, and pass the `reuse` keyword argument from the function to `tf.variable_scope`. ``` def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01): ''' Build the generator network. Arguments --------- z : Input tensor for the generator out_dim : Shape of the generator output n_units : Number of units in hidden layer reuse : Reuse the variables with tf.variable_scope alpha : leak parameter for leaky ReLU Returns ------- out: ''' with tf.variable_scope('generator', reuse=reuse): # Hidden layer h1 = tf.layers.dense(z, n_units, activation=None) # Leaky ReLU h1 = tf.maximum(alpha * h1, h1) # Logits and tanh output logits = tf.layers.dense(h1, out_dim, activation=None) out = tf.tanh(logits) return out ``` ## Discriminator The discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer. >**Exercise:** Implement the discriminator network in the function below. Same as above, you'll need to return both the logits and the sigmoid output. Make sure to wrap your code in a variable scope, with 'discriminator' as the scope name, and pass the `reuse` keyword argument from the function arguments to `tf.variable_scope`. ``` def discriminator(x, n_units=128, reuse=False, alpha=0.01): ''' Build the discriminator network. Arguments --------- x : Input tensor for the discriminator n_units: Number of units in hidden layer reuse : Reuse the variables with tf.variable_scope alpha : leak parameter for leaky ReLU Returns ------- out, logits: ''' with tf.variable_scope('discriminator', reuse = reuse): # finish this # Hidden layer h1 = tf.layers.dense(x, n_units, activation=None) # Leaky ReLU h1 = tf.maximum(alpha * h1, h1) logits = tf.layers.dense(h1, 1, activation=None) out = tf.sigmoid (logits) return out, logits ``` ## Hyperparameters ``` # Size of input image to discriminator input_size = 784 # 28x28 MNIST images flattened # Size of latent vector to generator z_size = 100 # Sizes of hidden layers in generator and discriminator g_hidden_size = 128 d_hidden_size = 128 # Leak factor for leaky ReLU alpha = 0.01 # Label smoothing smooth = 0.1 ``` ## Build network Now we're building the network from the functions defined above. First is to get our inputs, `input_real, input_z` from `model_inputs` using the sizes of the input and z. Then, we'll create the generator, `generator(input_z, input_size)`. This builds the generator with the appropriate input and output sizes. Then the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as `g_model`. So the real data discriminator is `discriminator(input_real)` while the fake discriminator is `discriminator(g_model, reuse=True)`. >**Exercise:** Build the network from the functions you defined earlier. ``` tf.reset_default_graph() # Create our input placeholders input_real, input_z = model_inputs(input_size, z_size) # Generator network here g_model = generator(input_z, input_size, n_units = g_hidden_size, alpha = alpha) # g_model is the generator output # Disriminator network here d_model_real, d_logits_real = discriminator(input_real, n_units=d_hidden_size, alpha=alpha) d_model_fake, d_logits_fake = discriminator(g_model, n_units = d_hidden_size, reuse = True, alpha = alpha) ``` ## Discriminator and Generator Losses Now we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, `d_loss = d_loss_real + d_loss_fake`. The losses will be sigmoid cross-entropies, which we can get with `tf.nn.sigmoid_cross_entropy_with_logits`. We'll also wrap that in `tf.reduce_mean` to get the mean for all the images in the batch. So the losses will look something like ```python tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)) ``` For the real image logits, we'll use `d_logits_real` which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter `smooth`. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like `labels = tf.ones_like(tensor) * (1 - smooth)` The discriminator loss for the fake data is similar. The logits are `d_logits_fake`, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that. Finally, the generator losses are using `d_logits_fake`, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images. >**Exercise:** Calculate the losses for the discriminator and the generator. There are two discriminator losses, one for real images and one for fake images. For the real image loss, use the real logits and (smoothed) labels of ones. For the fake image loss, use the fake logits with labels of all zeros. The total discriminator loss is the sum of those two losses. Finally, the generator loss again uses the fake logits from the discriminator, but this time the labels are all ones because the generator wants to fool the discriminator. ``` # Calculate losses d_loss_real = d_loss_fake = d_loss = g_loss = ``` ## Optimizers We want to update the generator and discriminator variables separately. So we need to get the variables for each part and build optimizers for the two parts. To get all the trainable variables, we use `tf.trainable_variables()`. This creates a list of all the variables we've defined in our graph. For the generator optimizer, we only want to generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with `generator`. So, we just need to iterate through the list from `tf.trainable_variables()` and keep variables that start with `generator`. Each variable object has an attribute `name` which holds the name of the variable as a string (`var.name == 'weights_0'` for instance). We can do something similar with the discriminator. All the variables in the discriminator start with `discriminator`. Then, in the optimizer we pass the variable lists to the `var_list` keyword argument of the `minimize` method. This tells the optimizer to only update the listed variables. Something like `tf.train.AdamOptimizer().minimize(loss, var_list=var_list)` will only train the variables in `var_list`. >**Exercise: ** Below, implement the optimizers for the generator and discriminator. First you'll need to get a list of trainable variables, then split that list into two lists, one for the generator variables and another for the discriminator variables. Finally, using `AdamOptimizer`, create an optimizer for each network that update the network variables separately. ``` # Optimizers learning_rate = 0.002 # Get the trainable_variables, split into G and D parts t_vars = g_vars = d_vars = d_train_opt = g_train_opt = ``` ## Training ``` batch_size = 100 epochs = 100 samples = [] losses = [] saver = tf.train.Saver(var_list = g_vars) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for e in range(epochs): for ii in range(mnist.train.num_examples//batch_size): batch = mnist.train.next_batch(batch_size) # Get images, reshape and rescale to pass to D batch_images = batch[0].reshape((batch_size, 784)) batch_images = batch_images*2 - 1 # Sample random noise for G batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size)) # Run optimizers _ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z}) _ = sess.run(g_train_opt, feed_dict={input_z: batch_z}) # At the end of each epoch, get the losses and print them out train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images}) train_loss_g = g_loss.eval({input_z: batch_z}) print("Epoch {}/{}...".format(e+1, epochs), "Discriminator Loss: {:.4f}...".format(train_loss_d), "Generator Loss: {:.4f}".format(train_loss_g)) # Save losses to view after training losses.append((train_loss_d, train_loss_g)) # Sample from generator as we're training for viewing afterwards sample_z = np.random.uniform(-1, 1, size=(16, z_size)) gen_samples = sess.run( generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha), feed_dict={input_z: sample_z}) samples.append(gen_samples) saver.save(sess, './checkpoints/generator.ckpt') # Save training generator samples with open('train_samples.pkl', 'wb') as f: pkl.dump(samples, f) ``` ## Training loss Here we'll check out the training losses for the generator and discriminator. ``` %matplotlib inline import matplotlib.pyplot as plt fig, ax = plt.subplots() losses = np.array(losses) plt.plot(losses.T[0], label='Discriminator') plt.plot(losses.T[1], label='Generator') plt.title("Training Losses") plt.legend() ``` ## Generator samples from training Here we can view samples of images from the generator. First we'll look at images taken while training. ``` def view_samples(epoch, samples): fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True) for ax, img in zip(axes.flatten(), samples[epoch]): ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) im = ax.imshow(img.reshape((28,28)), cmap='Greys_r') return fig, axes # Load samples from generator taken while training with open('train_samples.pkl', 'rb') as f: samples = pkl.load(f) ``` These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 5, 7, 3, 0, 9. Since this is just a sample, it isn't representative of the full range of images this generator can make. ``` _ = view_samples(-1, samples) ``` Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion! ``` rows, cols = 10, 6 fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True) for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes): for img, ax in zip(sample[::int(len(sample)/cols)], ax_row): ax.imshow(img.reshape((28,28)), cmap='Greys_r') ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) ``` It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise. Looks like 1, 9, and 8 show up first. Then, it learns 5 and 3. ## Sampling from the generator We can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples! ``` saver = tf.train.Saver(var_list=g_vars) with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) sample_z = np.random.uniform(-1, 1, size=(16, z_size)) gen_samples = sess.run( generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha), feed_dict={input_z: sample_z}) view_samples(0, [gen_samples]) ```
github_jupyter
%matplotlib inline import pickle as pkl import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data') def model_inputs(real_dim, z_dim): inputs_real = tf.placeholder(tf.float32, (None, real_dim), name = 'inputs_real') inputs_z = tf.placeholder(tf.float32, (None, z_dim), name = 'inputs_z') return inputs_real, inputs_z with tf.variable_scope('scope_name', reuse=False): # code here def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01): ''' Build the generator network. Arguments --------- z : Input tensor for the generator out_dim : Shape of the generator output n_units : Number of units in hidden layer reuse : Reuse the variables with tf.variable_scope alpha : leak parameter for leaky ReLU Returns ------- out: ''' with tf.variable_scope('generator', reuse=reuse): # Hidden layer h1 = tf.layers.dense(z, n_units, activation=None) # Leaky ReLU h1 = tf.maximum(alpha * h1, h1) # Logits and tanh output logits = tf.layers.dense(h1, out_dim, activation=None) out = tf.tanh(logits) return out def discriminator(x, n_units=128, reuse=False, alpha=0.01): ''' Build the discriminator network. Arguments --------- x : Input tensor for the discriminator n_units: Number of units in hidden layer reuse : Reuse the variables with tf.variable_scope alpha : leak parameter for leaky ReLU Returns ------- out, logits: ''' with tf.variable_scope('discriminator', reuse = reuse): # finish this # Hidden layer h1 = tf.layers.dense(x, n_units, activation=None) # Leaky ReLU h1 = tf.maximum(alpha * h1, h1) logits = tf.layers.dense(h1, 1, activation=None) out = tf.sigmoid (logits) return out, logits # Size of input image to discriminator input_size = 784 # 28x28 MNIST images flattened # Size of latent vector to generator z_size = 100 # Sizes of hidden layers in generator and discriminator g_hidden_size = 128 d_hidden_size = 128 # Leak factor for leaky ReLU alpha = 0.01 # Label smoothing smooth = 0.1 tf.reset_default_graph() # Create our input placeholders input_real, input_z = model_inputs(input_size, z_size) # Generator network here g_model = generator(input_z, input_size, n_units = g_hidden_size, alpha = alpha) # g_model is the generator output # Disriminator network here d_model_real, d_logits_real = discriminator(input_real, n_units=d_hidden_size, alpha=alpha) d_model_fake, d_logits_fake = discriminator(g_model, n_units = d_hidden_size, reuse = True, alpha = alpha) tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)) # Calculate losses d_loss_real = d_loss_fake = d_loss = g_loss = # Optimizers learning_rate = 0.002 # Get the trainable_variables, split into G and D parts t_vars = g_vars = d_vars = d_train_opt = g_train_opt = batch_size = 100 epochs = 100 samples = [] losses = [] saver = tf.train.Saver(var_list = g_vars) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for e in range(epochs): for ii in range(mnist.train.num_examples//batch_size): batch = mnist.train.next_batch(batch_size) # Get images, reshape and rescale to pass to D batch_images = batch[0].reshape((batch_size, 784)) batch_images = batch_images*2 - 1 # Sample random noise for G batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size)) # Run optimizers _ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z}) _ = sess.run(g_train_opt, feed_dict={input_z: batch_z}) # At the end of each epoch, get the losses and print them out train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images}) train_loss_g = g_loss.eval({input_z: batch_z}) print("Epoch {}/{}...".format(e+1, epochs), "Discriminator Loss: {:.4f}...".format(train_loss_d), "Generator Loss: {:.4f}".format(train_loss_g)) # Save losses to view after training losses.append((train_loss_d, train_loss_g)) # Sample from generator as we're training for viewing afterwards sample_z = np.random.uniform(-1, 1, size=(16, z_size)) gen_samples = sess.run( generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha), feed_dict={input_z: sample_z}) samples.append(gen_samples) saver.save(sess, './checkpoints/generator.ckpt') # Save training generator samples with open('train_samples.pkl', 'wb') as f: pkl.dump(samples, f) %matplotlib inline import matplotlib.pyplot as plt fig, ax = plt.subplots() losses = np.array(losses) plt.plot(losses.T[0], label='Discriminator') plt.plot(losses.T[1], label='Generator') plt.title("Training Losses") plt.legend() def view_samples(epoch, samples): fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True) for ax, img in zip(axes.flatten(), samples[epoch]): ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) im = ax.imshow(img.reshape((28,28)), cmap='Greys_r') return fig, axes # Load samples from generator taken while training with open('train_samples.pkl', 'rb') as f: samples = pkl.load(f) _ = view_samples(-1, samples) rows, cols = 10, 6 fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True) for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes): for img, ax in zip(sample[::int(len(sample)/cols)], ax_row): ax.imshow(img.reshape((28,28)), cmap='Greys_r') ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) saver = tf.train.Saver(var_list=g_vars) with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) sample_z = np.random.uniform(-1, 1, size=(16, z_size)) gen_samples = sess.run( generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha), feed_dict={input_z: sample_z}) view_samples(0, [gen_samples])
0.852752
0.991812
``` # Covid19 Analysis # libraries import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns from sklearn.model_selection import GridSearchCV from sklearn.metrics import roc_curve from sklearn.metrics import auc from sklearn.metrics import accuracy_score import pickle from sklearn.metrics import r2_score from sklearn.ensemble import RandomForestClassifier from sklearn.neural_network import MLPClassifier from sklearn.metrics import average_precision_score from sklearn.metrics import precision_recall_curve from xgboost import XGBRegressor from sklearn.linear_model import LogisticRegression from datetime import datetime from sklearn.ensemble import RandomForestRegressor from xgboost import XGBClassifier # reading dataset # https://opendatasus.saude.gov.br/dataset/bd-srag-2020 df = pd.read_csv('/home/pedro/bkp/code/dataset/INFLUD-21-09-2020.csv',sep=';',encoding = "ISO-8859-1") # Inputing constraint in the dataset # Positive case: df = df[df['PCR_SARS2']==1] print(df.shape) # Hospitalized people: df = df[df['PCR_SARS2']==1][df['HOSPITAL']==1][df['NU_IDADE_N']<=110] print(df.shape) # Hospitalized people with age small than 110: df = df[df['PCR_SARS2']==1][df['HOSPITAL']==1][df['NU_IDADE_N']<=110][df['EVOLUCAO'] != 3][df['EVOLUCAO'] != 9][df['EVOLUCAO'].notnull()] print(df.shape) #print(df[df['PCR_SARS2']==1][df['HOSPITAL']==1][df['NU_IDADE_N']<=110][df['EVOLUCAO'] != 9][df['EVOLUCAO'].notnull()].value_counts()) print(type(df)) # IDHM # Reading IBGE code for each municipalities and separating it for IDHM index df_atlas = pd.read_excel (r'/home/pedro/bkp/code/dataset/AtlasBrasil_Consulta.xlsx') # removind last interger in 'code' variable df_atlas['code'] = df_atlas['code'].astype(str).str[:-1].astype(np.int64) # Divinding IDHM in bins IDHM_veryhigh = set(df_atlas['code'][df_atlas['IDHM2010']>=0.800]) print(len(IDHM_veryhigh)) IDHM_high = set(df_atlas['code'][((df_atlas['IDHM2010']>=0.700)&(df_atlas['IDHM2010']<0.800))]) print(len(IDHM_high)) IDHM_medium = set(df_atlas['code'][((df_atlas['IDHM2010']>=0.600)&(df_atlas['IDHM2010']<0.700))]) print(len(IDHM_medium)) IDHM_low = set(df_atlas['code'][((df_atlas['IDHM2010']>=0.500)&(df_atlas['IDHM2010']<0.600))]) print(len(IDHM_low)) IDHM_verylow = set(df_atlas['code'][df_atlas['IDHM2010']<0.500]) print(len(IDHM_verylow)) df.loc[df['CO_MUN_NOT'].isin(IDHM_veryhigh) == True, 'IDHM'] = 5 df.loc[df['CO_MUN_NOT'].isin(IDHM_high) == True, 'IDHM'] = 4 df.loc[df['CO_MUN_NOT'].isin(IDHM_medium) == True, 'IDHM'] = 3 df.loc[df['CO_MUN_NOT'].isin(IDHM_low) == True, 'IDHM'] = 2 df.loc[df['CO_MUN_NOT'].isin(IDHM_verylow) == True, 'IDHM'] = 1 # Municipalities number analysed #print(df['IDHM'].isnull().sum()) # Private and public hospital separation df_hospital = pd.read_csv('/home/pedro/bkp/code/dataset/CNES_SUS.txt', sep='\t') public = set(df_hospital.iloc[:,0][df_hospital.iloc[:,3]=='S']) private = set(df_hospital.iloc[:,0][df_hospital.iloc[:,3]=='N']) df.loc[df['CO_UNI_NOT'].isin(public) == True, 'HEALTH_SYSTEM'] = 1 df.loc[df['CO_UNI_NOT'].isin(private) == True, 'HEALTH_SYSTEM'] = 0 # Constraint on dataset: We only analyze people with evolution, IDHM and Health system known df = df[df['IDHM'].notnull()][(df['HEALTH_SYSTEM']==1)|(df['HEALTH_SYSTEM']==0)] print(df.shape) # Selecting features df = df[['NU_IDADE_N','HEALTH_SYSTEM','SUPORT_VEN','EVOLUCAO']] # boxing the age feature bins = [0, 40, 50, 60, 70, 150] names = ['<40', '40-50', '50-60','60-70', '70>'] df['NU_IDADE_N'] = pd.cut(df['NU_IDADE_N'], bins=bins, labels=names, right= False) df['NU_IDADE_N'] = df['NU_IDADE_N'].map({'<40': 40, '40-50':50, '50-60': 60 , '60-70':70, '70>':150}) print(df.shape) df_missing = df.isnull().sum() print(df_missing) # Public and private hospital public_hosp = df['NU_IDADE_N'][df['HEALTH_SYSTEM'] == 1].shape[0] # public private_hosp = df['NU_IDADE_N'][df['HEALTH_SYSTEM'] == 0].shape[0] # private # Plotting figure y_pos = np.arange(len(names)) age= df['NU_IDADE_N'] # Create horizontal bars plt.figure(figsize=(10,5)) # Health System death people a = df['NU_IDADE_N'][((df['HEALTH_SYSTEM'] == 1) & (df['EVOLUCAO'] == 1))].value_counts().sort_index(ascending=True) b = df['NU_IDADE_N'][((df['HEALTH_SYSTEM'] == 1) & (df['EVOLUCAO'] == 2))].value_counts().sort_index(ascending=True) d = df['NU_IDADE_N'][((df['HEALTH_SYSTEM'] == 0) & (df['EVOLUCAO'] == 2))].value_counts().sort_index(ascending=True) e = df['NU_IDADE_N'][((df['HEALTH_SYSTEM'] == 0) & (df['EVOLUCAO'] == 1))].value_counts().sort_index(ascending=True) grupos = 5 indice = np.arange(grupos) bar_larg = 0.4 transp = 0.7 plt.bar(indice + 0.2, b.iloc[:]/public_hosp, bar_larg, alpha=0.7, color='blue', label='Public') plt.bar(indice - 0.2, d.iloc[:]/private_hosp, bar_larg, alpha=0.7, color='red', label='Private') plt.xticks(np.arange(5),['<40','40-49','50-59','60-69','$\geq$ 70'],fontsize=15) plt.yticks(fontsize=15) plt.legend(loc='upper left',fontsize=15) plt.ylabel('Prevalence',fontsize=17) plt.xlabel('Age',fontsize=17) plt.ylim([0, 0.23]) plt.tight_layout() plt.savefig('private_public_new.pdf') plt.show() # Plotting figure y_pos = np.arange(len(names)) age= df['NU_IDADE_N'] # Create horizontal bars plt.figure(figsize=(10,5)) # Health System death people a = df['NU_IDADE_N'][((df['HEALTH_SYSTEM'] == 1) & (df['EVOLUCAO'] == 1))].value_counts().sort_index(ascending=True) b = df['NU_IDADE_N'][((df['HEALTH_SYSTEM'] == 1) & (df['EVOLUCAO'] == 2))].value_counts().sort_index(ascending=True) d = df['NU_IDADE_N'][((df['HEALTH_SYSTEM'] == 0) & (df['EVOLUCAO'] == 2))].value_counts().sort_index(ascending=True) e = df['NU_IDADE_N'][((df['HEALTH_SYSTEM'] == 0) & (df['EVOLUCAO'] == 1))].value_counts().sort_index(ascending=True) grupos = 5 indice = np.arange(grupos) bar_larg = 0.8 transp = 0.7 plt.barh(indice, b.iloc[:]/public_hosp, bar_larg, alpha=0.5, color='blue', label='Public') plt.barh(indice, -d.iloc[:]/private_hosp, bar_larg, alpha=0.7, color='red', label='Private') plt.axvline(x=0,color='k',linestyle='--',linewidth=1) plt.yticks(np.arange(5),['<40','40-49','50-59','60-69','$\geq$ 70'],fontsize=15) plt.xticks(fontsize=15) plt.legend(fontsize=15) #plt.title('Death',fontsize=17) plt.xlabel('Prevalence',fontsize=17) plt.ylabel('Age',fontsize=17) plt.xlim([-0.205, 0.205]) plt.tight_layout() plt.savefig('private_public_02.pdf') plt.show() # Quick test # Death people for public and private print(b.sum()) # public print(d.sum()) # private df_check = df['NU_IDADE_N'][((df['HEALTH_SYSTEM'] == 1) & (df['EVOLUCAO'] == 2))].isnull().sum() print(df_check) ```
github_jupyter
# Covid19 Analysis # libraries import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns from sklearn.model_selection import GridSearchCV from sklearn.metrics import roc_curve from sklearn.metrics import auc from sklearn.metrics import accuracy_score import pickle from sklearn.metrics import r2_score from sklearn.ensemble import RandomForestClassifier from sklearn.neural_network import MLPClassifier from sklearn.metrics import average_precision_score from sklearn.metrics import precision_recall_curve from xgboost import XGBRegressor from sklearn.linear_model import LogisticRegression from datetime import datetime from sklearn.ensemble import RandomForestRegressor from xgboost import XGBClassifier # reading dataset # https://opendatasus.saude.gov.br/dataset/bd-srag-2020 df = pd.read_csv('/home/pedro/bkp/code/dataset/INFLUD-21-09-2020.csv',sep=';',encoding = "ISO-8859-1") # Inputing constraint in the dataset # Positive case: df = df[df['PCR_SARS2']==1] print(df.shape) # Hospitalized people: df = df[df['PCR_SARS2']==1][df['HOSPITAL']==1][df['NU_IDADE_N']<=110] print(df.shape) # Hospitalized people with age small than 110: df = df[df['PCR_SARS2']==1][df['HOSPITAL']==1][df['NU_IDADE_N']<=110][df['EVOLUCAO'] != 3][df['EVOLUCAO'] != 9][df['EVOLUCAO'].notnull()] print(df.shape) #print(df[df['PCR_SARS2']==1][df['HOSPITAL']==1][df['NU_IDADE_N']<=110][df['EVOLUCAO'] != 9][df['EVOLUCAO'].notnull()].value_counts()) print(type(df)) # IDHM # Reading IBGE code for each municipalities and separating it for IDHM index df_atlas = pd.read_excel (r'/home/pedro/bkp/code/dataset/AtlasBrasil_Consulta.xlsx') # removind last interger in 'code' variable df_atlas['code'] = df_atlas['code'].astype(str).str[:-1].astype(np.int64) # Divinding IDHM in bins IDHM_veryhigh = set(df_atlas['code'][df_atlas['IDHM2010']>=0.800]) print(len(IDHM_veryhigh)) IDHM_high = set(df_atlas['code'][((df_atlas['IDHM2010']>=0.700)&(df_atlas['IDHM2010']<0.800))]) print(len(IDHM_high)) IDHM_medium = set(df_atlas['code'][((df_atlas['IDHM2010']>=0.600)&(df_atlas['IDHM2010']<0.700))]) print(len(IDHM_medium)) IDHM_low = set(df_atlas['code'][((df_atlas['IDHM2010']>=0.500)&(df_atlas['IDHM2010']<0.600))]) print(len(IDHM_low)) IDHM_verylow = set(df_atlas['code'][df_atlas['IDHM2010']<0.500]) print(len(IDHM_verylow)) df.loc[df['CO_MUN_NOT'].isin(IDHM_veryhigh) == True, 'IDHM'] = 5 df.loc[df['CO_MUN_NOT'].isin(IDHM_high) == True, 'IDHM'] = 4 df.loc[df['CO_MUN_NOT'].isin(IDHM_medium) == True, 'IDHM'] = 3 df.loc[df['CO_MUN_NOT'].isin(IDHM_low) == True, 'IDHM'] = 2 df.loc[df['CO_MUN_NOT'].isin(IDHM_verylow) == True, 'IDHM'] = 1 # Municipalities number analysed #print(df['IDHM'].isnull().sum()) # Private and public hospital separation df_hospital = pd.read_csv('/home/pedro/bkp/code/dataset/CNES_SUS.txt', sep='\t') public = set(df_hospital.iloc[:,0][df_hospital.iloc[:,3]=='S']) private = set(df_hospital.iloc[:,0][df_hospital.iloc[:,3]=='N']) df.loc[df['CO_UNI_NOT'].isin(public) == True, 'HEALTH_SYSTEM'] = 1 df.loc[df['CO_UNI_NOT'].isin(private) == True, 'HEALTH_SYSTEM'] = 0 # Constraint on dataset: We only analyze people with evolution, IDHM and Health system known df = df[df['IDHM'].notnull()][(df['HEALTH_SYSTEM']==1)|(df['HEALTH_SYSTEM']==0)] print(df.shape) # Selecting features df = df[['NU_IDADE_N','HEALTH_SYSTEM','SUPORT_VEN','EVOLUCAO']] # boxing the age feature bins = [0, 40, 50, 60, 70, 150] names = ['<40', '40-50', '50-60','60-70', '70>'] df['NU_IDADE_N'] = pd.cut(df['NU_IDADE_N'], bins=bins, labels=names, right= False) df['NU_IDADE_N'] = df['NU_IDADE_N'].map({'<40': 40, '40-50':50, '50-60': 60 , '60-70':70, '70>':150}) print(df.shape) df_missing = df.isnull().sum() print(df_missing) # Public and private hospital public_hosp = df['NU_IDADE_N'][df['HEALTH_SYSTEM'] == 1].shape[0] # public private_hosp = df['NU_IDADE_N'][df['HEALTH_SYSTEM'] == 0].shape[0] # private # Plotting figure y_pos = np.arange(len(names)) age= df['NU_IDADE_N'] # Create horizontal bars plt.figure(figsize=(10,5)) # Health System death people a = df['NU_IDADE_N'][((df['HEALTH_SYSTEM'] == 1) & (df['EVOLUCAO'] == 1))].value_counts().sort_index(ascending=True) b = df['NU_IDADE_N'][((df['HEALTH_SYSTEM'] == 1) & (df['EVOLUCAO'] == 2))].value_counts().sort_index(ascending=True) d = df['NU_IDADE_N'][((df['HEALTH_SYSTEM'] == 0) & (df['EVOLUCAO'] == 2))].value_counts().sort_index(ascending=True) e = df['NU_IDADE_N'][((df['HEALTH_SYSTEM'] == 0) & (df['EVOLUCAO'] == 1))].value_counts().sort_index(ascending=True) grupos = 5 indice = np.arange(grupos) bar_larg = 0.4 transp = 0.7 plt.bar(indice + 0.2, b.iloc[:]/public_hosp, bar_larg, alpha=0.7, color='blue', label='Public') plt.bar(indice - 0.2, d.iloc[:]/private_hosp, bar_larg, alpha=0.7, color='red', label='Private') plt.xticks(np.arange(5),['<40','40-49','50-59','60-69','$\geq$ 70'],fontsize=15) plt.yticks(fontsize=15) plt.legend(loc='upper left',fontsize=15) plt.ylabel('Prevalence',fontsize=17) plt.xlabel('Age',fontsize=17) plt.ylim([0, 0.23]) plt.tight_layout() plt.savefig('private_public_new.pdf') plt.show() # Plotting figure y_pos = np.arange(len(names)) age= df['NU_IDADE_N'] # Create horizontal bars plt.figure(figsize=(10,5)) # Health System death people a = df['NU_IDADE_N'][((df['HEALTH_SYSTEM'] == 1) & (df['EVOLUCAO'] == 1))].value_counts().sort_index(ascending=True) b = df['NU_IDADE_N'][((df['HEALTH_SYSTEM'] == 1) & (df['EVOLUCAO'] == 2))].value_counts().sort_index(ascending=True) d = df['NU_IDADE_N'][((df['HEALTH_SYSTEM'] == 0) & (df['EVOLUCAO'] == 2))].value_counts().sort_index(ascending=True) e = df['NU_IDADE_N'][((df['HEALTH_SYSTEM'] == 0) & (df['EVOLUCAO'] == 1))].value_counts().sort_index(ascending=True) grupos = 5 indice = np.arange(grupos) bar_larg = 0.8 transp = 0.7 plt.barh(indice, b.iloc[:]/public_hosp, bar_larg, alpha=0.5, color='blue', label='Public') plt.barh(indice, -d.iloc[:]/private_hosp, bar_larg, alpha=0.7, color='red', label='Private') plt.axvline(x=0,color='k',linestyle='--',linewidth=1) plt.yticks(np.arange(5),['<40','40-49','50-59','60-69','$\geq$ 70'],fontsize=15) plt.xticks(fontsize=15) plt.legend(fontsize=15) #plt.title('Death',fontsize=17) plt.xlabel('Prevalence',fontsize=17) plt.ylabel('Age',fontsize=17) plt.xlim([-0.205, 0.205]) plt.tight_layout() plt.savefig('private_public_02.pdf') plt.show() # Quick test # Death people for public and private print(b.sum()) # public print(d.sum()) # private df_check = df['NU_IDADE_N'][((df['HEALTH_SYSTEM'] == 1) & (df['EVOLUCAO'] == 2))].isnull().sum() print(df_check)
0.436862
0.310897
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/ksachdeva/rethinking-tensorflow-probability/blob/master/notebooks/15_missing_data_and_other_opportunities.ipynb) # Chapter 15 - Missing Data and Other Opportunities ## Imports and utility functions ``` # Install packages that are not installed in colab try: import google.colab IN_COLAB = True except: IN_COLAB = False if IN_COLAB: %tensorflow_version 2.X !pip install watermark !pip install arviz USE_NIGHTLY_TFP = True # @param if IN_COLAB and USE_NIGHTLY_TFP: !pip install --upgrade tf-nightly !pip install --upgrade tfp-nightly %load_ext watermark # Core import numpy as np import arviz as az import pandas as pd import xarray as xr import tensorflow as tf import tensorflow_probability as tfp # visualization import matplotlib.pyplot as plt # aliases tfd = tfp.distributions tfb = tfp.bijectors Root = tfd.JointDistributionCoroutine.Root %watermark -p numpy,tensorflow,tensorflow_probability,arviz,scipy,pandas # config of various plotting libraries %config InlineBackend.figure_format = 'retina' az.style.use('arviz-darkgrid') if not USE_NIGHTLY_TFP: assert tf.__version__ >= '2.1.0', "Tensorflow version should be at minimum 2.1.0" assert tfp.__version__ >= '0.9.0', "TFP version should be at minimum 0.9.0" ``` ## Tensorflow MCMC Sampling helpers ``` USE_XLA = False NUMBER_OF_CHAINS = 2 NUMBER_OF_BURNIN = 500 NUMBER_OF_SAMPLES = 500 NUMBER_OF_LEAPFROG_STEPS = 4 def _trace_to_arviz(trace=None, sample_stats=None, observed_data=None, prior_predictive=None, posterior_predictive=None, inplace=True): if trace is not None and isinstance(trace, dict): trace = {k: np.swapaxes(v.numpy(), 1, 0) for k, v in trace.items()} if sample_stats is not None and isinstance(sample_stats, dict): sample_stats = {k: v.numpy().T for k, v in sample_stats.items()} if prior_predictive is not None and isinstance(prior_predictive, dict): prior_predictive = {k: v[np.newaxis] for k, v in prior_predictive.items()} if posterior_predictive is not None and isinstance(posterior_predictive, dict): if isinstance(trace, az.InferenceData) and inplace == True: return trace + az.from_dict(posterior_predictive=posterior_predictive) else: trace = None return az.from_dict( posterior=trace, sample_stats=sample_stats, prior_predictive=prior_predictive, posterior_predictive=posterior_predictive, observed_data=observed_data, ) @tf.function(autograph=False, experimental_compile=USE_XLA) def run_chain(init_state, bijectors, step_size, target_log_prob_fn, num_leapfrog_steps=NUMBER_OF_LEAPFROG_STEPS, num_samples=NUMBER_OF_SAMPLES, burnin=NUMBER_OF_BURNIN, ): def _trace_fn_transitioned(_, pkr): return ( pkr.inner_results.inner_results.log_accept_ratio ) hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo( target_log_prob_fn, num_leapfrog_steps=num_leapfrog_steps, step_size=step_size) inner_kernel = tfp.mcmc.TransformedTransitionKernel( inner_kernel=hmc_kernel, bijector=bijectors) kernel = tfp.mcmc.SimpleStepSizeAdaptation( inner_kernel=inner_kernel, target_accept_prob=.8, num_adaptation_steps=int(0.8*burnin), log_accept_prob_getter_fn=lambda pkr: pkr.inner_results.log_accept_ratio ) results, sampler_stat = tfp.mcmc.sample_chain( num_results=num_samples, num_burnin_steps=burnin, current_state=init_state, kernel=kernel, trace_fn=_trace_fn_transitioned) return results, sampler_stat def sample_posterior(jdc, observed_data, params, num_chains=NUMBER_OF_CHAINS, init_state=None, bijectors=None, num_samples=NUMBER_OF_SAMPLES, burnin=NUMBER_OF_BURNIN): if init_state is None: init_state = list(jdc.sample(NUMBER_OF_CHAINS)[:-1]) if bijectors is None: bijectors = [tfb.Identity() for i in init_state] target_log_prob_fn = lambda *x: jdc.log_prob(x + observed_data) step_size = 0.1 results, sample_stats = run_chain(init_state, bijectors, step_size=step_size, target_log_prob_fn=target_log_prob_fn, num_samples=num_samples, burnin=burnin) stat_names = ['mean_tree_accept'] sampler_stats = dict(zip(stat_names, [sample_stats])) posterior = dict(zip(params, results)) return _trace_to_arviz(trace=posterior, sample_stats=sampler_stats) ``` ## Dataset URLs ``` # You could change base url to local dir or a remoate raw github content _BASE_URL = "https://raw.githubusercontent.com/ksachdeva/rethinking-tensorflow-probability/master/data" WAFFLE_DIVORCE_DATASET_PATH = f"{_BASE_URL}/WaffleDivorce.csv" ``` ## Code 15.1 ``` # simulate a pancake and return randomly ordered sides def sim_pancake(): pancake = tfd.Categorical(logits=np.ones(3)).sample().numpy() sides = np.array([1, 1, 1, 0, 0, 0]).reshape(3, 2).T[:, pancake] np.random.shuffle(sides) return sides # sim 10,000 pancakes pancakes = [] for i in range(10_000): pancakes.append(sim_pancake()) pancakes = np.array(pancakes).T up = pancakes[0] down = pancakes[1] # compute proportion 1/1 (BB) out of all 1/1 and 1/0 num_11_10 = np.sum(up == 1) num_11 = np.sum((up == 1) & (down == 1)) num_11 / num_11_10 ``` ## Code 15.2 In the waffle dataset, both divorce rate and marriage rate variables are measured with substantial error and that error is reported in the form of standard errors. Also error varies across the states. Below we are plotting the measurement errors ``` d = pd.read_csv(WAFFLE_DIVORCE_DATASET_PATH, sep=";") # points ax = az.plot_pair(d[["MedianAgeMarriage", "Divorce"]].to_dict(orient="list"), plot_kwargs=dict(ms=15, mfc="none")) ax.set(ylim=(4, 15), xlabel="Median age marriage", ylabel="Divorce rate") # standard errors for i in range(d.shape[0]): ci = d.Divorce[i] + np.array([-1, 1]) * d["Divorce SE"][i] x = d.MedianAgeMarriage[i] plt.plot([x, x], ci, "k") ``` In the above plot, the lenght of the vertical lines show how uncertain the observed divorce rate is. ## Code 15.3 ``` dat = dict( D_obs=tf.cast(d.Divorce.pipe(lambda x: (x - x.mean()) / x.std()).values, dtype=tf.float32), D_sd=tf.cast(d["Divorce SE"].values / d.Divorce.std(), dtype=tf.float32), M=d.Marriage.pipe(lambda x: (x - x.mean()) / x.std()).values, A=d.MedianAgeMarriage.pipe(lambda x: (x - x.mean()) / x.std()).values, N=d.shape[0]) def model_15_1(A, M, D_sd, N): def _generator(): alpha = yield Root(tfd.Sample(tfd.Normal(loc=0., scale=0.2, name="alpha"), sample_shape=1)) betaA = yield Root(tfd.Sample(tfd.Normal(loc=0., scale=0.5, name="betaA"), sample_shape=1)) betaM = yield Root(tfd.Sample(tfd.Normal(loc=0., scale=0.5, name="betaM"), sample_shape=1)) sigma = yield Root(tfd.Sample(tfd.Exponential(rate=1., name="sigma"), sample_shape=1)) mu = alpha[...,tf.newaxis] + betaA[...,tf.newaxis] * A + betaM[...,tf.newaxis] * M scale = sigma[...,tf.newaxis] D_true = yield tfd.Independent(tfd.Normal(loc=mu, scale=scale), reinterpreted_batch_ndims=1) D_obs = yield tfd.Independent(tfd.Normal(loc=D_true, scale=D_sd), reinterpreted_batch_ndims=1) return tfd.JointDistributionCoroutine(_generator, validate_args=False) jdc_15_1 = model_15_1(dat["A"], dat["M"], dat["D_sd"], dat["N"]) NUM_CHAINS_FOR_15_1 = 2 init_state = [ tf.zeros([NUM_CHAINS_FOR_15_1]), tf.zeros([NUM_CHAINS_FOR_15_1]), tf.zeros([NUM_CHAINS_FOR_15_1]), tf.ones([NUM_CHAINS_FOR_15_1]), tf.zeros([NUM_CHAINS_FOR_15_1, dat["N"]]), ] bijectors = [ tfb.Identity(), tfb.Identity(), tfb.Identity(), tfb.Exp(), tfb.Identity() ] trace_15_1 = sample_posterior(jdc_15_1, observed_data=(dat["D_obs"],), params=['alpha', 'betaA', 'betaM', 'sigma', 'D_true'], init_state=init_state, bijectors=bijectors) ``` ## Code 15.4 ``` az.summary(trace_15_1, round_to=2, kind='all', credible_interval=0.89) ``` ## Code 15.5 What happens when there is a measurement error on predictor variables as well ? ``` dat = dict( D_obs=tf.cast(d.Divorce.pipe(lambda x: (x - x.mean()) / x.std()).values, dtype=tf.float32), D_sd=tf.cast(d["Divorce SE"].values / d.Divorce.std(), dtype=tf.float32), M_obs=tf.cast(d.Marriage.pipe(lambda x: (x - x.mean()) / x.std()).values, dtype=tf.float32), M_sd=tf.cast(d["Marriage SE"].values / d.Marriage.std(), dtype=tf.float32), A=d.MedianAgeMarriage.pipe(lambda x: (x - x.mean()) / x.std()).values, N=d.shape[0]) def model_15_2(A, M_sd, D_sd, N): def _generator(): alpha = yield Root(tfd.Sample(tfd.Normal(loc=0., scale=0.2, name="alpha"), sample_shape=1)) betaA = yield Root(tfd.Sample(tfd.Normal(loc=0., scale=0.5, name="betaA"), sample_shape=1)) betaM = yield Root(tfd.Sample(tfd.Normal(loc=0., scale=0.5, name="betaM"), sample_shape=1)) sigma = yield Root(tfd.Sample(tfd.Exponential(rate=1., name="sigma"), sample_shape=1)) M_true = yield Root(tfd.Sample(tfd.Normal(loc=0., scale=1., name="M_true"), sample_shape=N)) mu = alpha[...,tf.newaxis] + betaA[...,tf.newaxis] * A + betaM[...,tf.newaxis] * M_true scale = sigma[...,tf.newaxis] D_true = yield tfd.Independent(tfd.Normal(loc=mu, scale=scale), reinterpreted_batch_ndims=1) D_obs = yield tfd.Independent(tfd.Normal(loc=D_true, scale=D_sd), reinterpreted_batch_ndims=1) M_obs = yield tfd.Independent(tfd.Normal(loc=M_true, scale=M_sd, name="M_obs"), reinterpreted_batch_ndims=1) return tfd.JointDistributionCoroutine(_generator, validate_args=False) jdc_15_2 = model_15_2(dat["A"], dat["M_sd"], dat["D_sd"], dat["N"]) NUM_CHAINS_FOR_15_2 = 2 init_state = [ tf.zeros([NUM_CHAINS_FOR_15_2]), tf.zeros([NUM_CHAINS_FOR_15_2]), tf.zeros([NUM_CHAINS_FOR_15_2]), tf.ones([NUM_CHAINS_FOR_15_2]), tf.zeros([NUM_CHAINS_FOR_15_2, dat["N"]]), # M_True tf.zeros([NUM_CHAINS_FOR_15_2, dat["N"]]), # D_True ] bijectors = [ tfb.Identity(), tfb.Identity(), tfb.Identity(), tfb.Exp(), tfb.Identity(), tfb.Identity() ] trace_15_2 = sample_posterior(jdc_15_2, observed_data=(dat["D_obs"], dat["M_obs"]), params=['alpha', 'betaA', 'betaM', 'sigma', 'M_true', 'D_true'], init_state=init_state, bijectors=bijectors) ``` ## Code 15.6 ``` post_D_true = trace_15_2.posterior["D_true"].values[0] post_M_true = trace_15_2.posterior["M_true"].values[0] D_est = np.mean(post_D_true, 0) M_est = np.mean(post_M_true, 0) plt.plot(dat["M_obs"], dat["D_obs"], "bo", alpha=0.5) plt.gca().set(xlabel="marriage rate (std)", ylabel="divorce rate (std)") plt.plot(M_est, D_est, "ko", mfc="none") for i in range(d.shape[0]): plt.plot([dat["M_obs"][i], M_est[i]], [dat["D_obs"][i], D_est[i]], "k-", lw=1) ``` Above figure demonstrates shrinkage of both divorce rate and marriage rate. Solid points are the observed values. Open points are posterior means. Lines connect pairs of points for the same state. Both variables are shrunk towards the inferred regression relationship. With measurement error, the insight is to realize that any uncertain piece of data can be replaced by a distribution that reflects uncertainty. ## Code 15.7 ``` # Simulated toy data N = 500 A = tfd.Normal(loc=0., scale=1.0).sample((N,)) M = tfd.Normal(loc=-A, scale=1.0).sample() D = tfd.Normal(loc=A, scale=1.0).sample() A_obs = tfd.Normal(loc=A, scale=1.).sample() ``` ## Code 15.8 ``` N = 100 S = tfd.Normal(loc=0., scale=1.).sample((N,)) H = tfd.Binomial(total_count=10, probs=tf.sigmoid(S)).sample() ``` ## Code 15.9 Hm = Homework missing Dog's decision to eat a piece of homework or not is not influenced by any relevant variable ``` D = tfd.Bernoulli(0.5).sample().numpy() # dogs completely random Hm = np.where(D == 1, np.nan, H) Hm ``` Since missing values are random, missignness does not necessiarily change the overall distribution of homework score. ## Code 15.10 Here studying influences whether a dog eats homework S->D Students who study a lot do not play with their Dogs and then dogs take revenge by eating homework ``` D = np.where(S > 0, 1, 0) Hm = np.where(D == 1, np.nan, H) Hm ``` Now every student who studies more than average (0) is missing homework ## Code 15.11 The case of noisy home and its influence on homework & Dog's behavior ``` # TODO - use seed; have not been able to make it work with tfp N = 1000 X = tfd.Sample(tfd.Normal(loc=0., scale=1.), sample_shape=(N,)).sample().numpy() S = tfd.Sample(tfd.Normal(loc=0., scale=1.), sample_shape=(N,)).sample().numpy() logits = 2 + S - 2 * X H = tfd.Binomial(total_count=10, logits=logits).sample().numpy() D = np.where(X > 1, 1, 0) Hm = np.where(D == 1, np.nan, H) ``` ## Code 15.12 ``` dat = dict(H=H, S=S) def model_15_3(S): def _generator(): alpha = yield Root(tfd.Sample(tfd.Normal(loc=0., scale=1., name="alpha"), sample_shape=1)) betaS = yield Root(tfd.Sample(tfd.Normal(loc=0., scale=0.5, name="betaS"), sample_shape=1)) logits = tf.squeeze(alpha[...,tf.newaxis] + betaS[...,tf.newaxis] * S) H = yield tfd.Independent(tfd.Binomial(total_count=10, logits=logits), reinterpreted_batch_ndims=1) return tfd.JointDistributionCoroutine(_generator, validate_args=False) jdc_15_3 = model_15_3(dat["S"]) NUM_CHAINS_FOR_15_3 = 4 alpha_init, betaS_init, _ = jdc_15_3.sample() init_state = [ tf.tile(alpha_init, (NUM_CHAINS_FOR_15_3,)), tf.tile(betaS_init, (NUM_CHAINS_FOR_15_3,)) ] bijectors = [ tfb.Identity(), tfb.Identity(), ] trace_15_3 = sample_posterior(jdc_15_3, observed_data=(dat["H"],), params=['alpha', 'betaS'], init_state=init_state, bijectors=bijectors) az.summary(trace_15_3, round_to=2, kind='all', credible_interval=0.89) ``` The true coefficient on S should be 1.00. We don’t expect to get that exactly, but the estimate above is way off ## Code 15.13 We build the model with missing data now ``` dat = dict(H=H[D==0], S=S[D==0]) def model_15_4(S): def _generator(): alpha = yield Root(tfd.Sample(tfd.Normal(loc=0., scale=1., name="alpha"), sample_shape=1)) betaS = yield Root(tfd.Sample(tfd.Normal(loc=0., scale=0.5, name="betaS"), sample_shape=1)) logits = tf.squeeze(alpha[...,tf.newaxis] + betaS[...,tf.newaxis] * S) H = yield tfd.Independent(tfd.Binomial(total_count=10, logits=logits), reinterpreted_batch_ndims=1) return tfd.JointDistributionCoroutine(_generator, validate_args=False) jdc_15_4 = model_15_4(dat["S"]) NUM_CHAINS_FOR_15_4 = 2 alpha_init, betaS_init, _ = jdc_15_4.sample() init_state = [ tf.tile(alpha_init, (NUM_CHAINS_FOR_15_4,)), tf.tile(betaS_init, (NUM_CHAINS_FOR_15_4,)) ] bijectors = [ tfb.Identity(), tfb.Identity(), ] trace_15_4 = sample_posterior(jdc_15_4, observed_data=(dat["H"],), params=['alpha', 'betaS'], init_state=init_state, bijectors=bijectors) az.summary(trace_15_4, round_to=2, kind='all', credible_interval=0.89) ``` ## Code 15.14 ``` D = np.where(np.abs(X) < 1, 1, 0) ``` ## Code 15.15 ``` N = 100 S = tfd.Normal(loc=0., scale=1.).sample((N,)) H = tfd.Binomial(total_count=10, logits=S).sample().numpy() D = np.where(H < 5, 1, 0) Hm = np.where(D == 1, np.nan, H) Hm ```
github_jupyter
# Install packages that are not installed in colab try: import google.colab IN_COLAB = True except: IN_COLAB = False if IN_COLAB: %tensorflow_version 2.X !pip install watermark !pip install arviz USE_NIGHTLY_TFP = True # @param if IN_COLAB and USE_NIGHTLY_TFP: !pip install --upgrade tf-nightly !pip install --upgrade tfp-nightly %load_ext watermark # Core import numpy as np import arviz as az import pandas as pd import xarray as xr import tensorflow as tf import tensorflow_probability as tfp # visualization import matplotlib.pyplot as plt # aliases tfd = tfp.distributions tfb = tfp.bijectors Root = tfd.JointDistributionCoroutine.Root %watermark -p numpy,tensorflow,tensorflow_probability,arviz,scipy,pandas # config of various plotting libraries %config InlineBackend.figure_format = 'retina' az.style.use('arviz-darkgrid') if not USE_NIGHTLY_TFP: assert tf.__version__ >= '2.1.0', "Tensorflow version should be at minimum 2.1.0" assert tfp.__version__ >= '0.9.0', "TFP version should be at minimum 0.9.0" USE_XLA = False NUMBER_OF_CHAINS = 2 NUMBER_OF_BURNIN = 500 NUMBER_OF_SAMPLES = 500 NUMBER_OF_LEAPFROG_STEPS = 4 def _trace_to_arviz(trace=None, sample_stats=None, observed_data=None, prior_predictive=None, posterior_predictive=None, inplace=True): if trace is not None and isinstance(trace, dict): trace = {k: np.swapaxes(v.numpy(), 1, 0) for k, v in trace.items()} if sample_stats is not None and isinstance(sample_stats, dict): sample_stats = {k: v.numpy().T for k, v in sample_stats.items()} if prior_predictive is not None and isinstance(prior_predictive, dict): prior_predictive = {k: v[np.newaxis] for k, v in prior_predictive.items()} if posterior_predictive is not None and isinstance(posterior_predictive, dict): if isinstance(trace, az.InferenceData) and inplace == True: return trace + az.from_dict(posterior_predictive=posterior_predictive) else: trace = None return az.from_dict( posterior=trace, sample_stats=sample_stats, prior_predictive=prior_predictive, posterior_predictive=posterior_predictive, observed_data=observed_data, ) @tf.function(autograph=False, experimental_compile=USE_XLA) def run_chain(init_state, bijectors, step_size, target_log_prob_fn, num_leapfrog_steps=NUMBER_OF_LEAPFROG_STEPS, num_samples=NUMBER_OF_SAMPLES, burnin=NUMBER_OF_BURNIN, ): def _trace_fn_transitioned(_, pkr): return ( pkr.inner_results.inner_results.log_accept_ratio ) hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo( target_log_prob_fn, num_leapfrog_steps=num_leapfrog_steps, step_size=step_size) inner_kernel = tfp.mcmc.TransformedTransitionKernel( inner_kernel=hmc_kernel, bijector=bijectors) kernel = tfp.mcmc.SimpleStepSizeAdaptation( inner_kernel=inner_kernel, target_accept_prob=.8, num_adaptation_steps=int(0.8*burnin), log_accept_prob_getter_fn=lambda pkr: pkr.inner_results.log_accept_ratio ) results, sampler_stat = tfp.mcmc.sample_chain( num_results=num_samples, num_burnin_steps=burnin, current_state=init_state, kernel=kernel, trace_fn=_trace_fn_transitioned) return results, sampler_stat def sample_posterior(jdc, observed_data, params, num_chains=NUMBER_OF_CHAINS, init_state=None, bijectors=None, num_samples=NUMBER_OF_SAMPLES, burnin=NUMBER_OF_BURNIN): if init_state is None: init_state = list(jdc.sample(NUMBER_OF_CHAINS)[:-1]) if bijectors is None: bijectors = [tfb.Identity() for i in init_state] target_log_prob_fn = lambda *x: jdc.log_prob(x + observed_data) step_size = 0.1 results, sample_stats = run_chain(init_state, bijectors, step_size=step_size, target_log_prob_fn=target_log_prob_fn, num_samples=num_samples, burnin=burnin) stat_names = ['mean_tree_accept'] sampler_stats = dict(zip(stat_names, [sample_stats])) posterior = dict(zip(params, results)) return _trace_to_arviz(trace=posterior, sample_stats=sampler_stats) # You could change base url to local dir or a remoate raw github content _BASE_URL = "https://raw.githubusercontent.com/ksachdeva/rethinking-tensorflow-probability/master/data" WAFFLE_DIVORCE_DATASET_PATH = f"{_BASE_URL}/WaffleDivorce.csv" # simulate a pancake and return randomly ordered sides def sim_pancake(): pancake = tfd.Categorical(logits=np.ones(3)).sample().numpy() sides = np.array([1, 1, 1, 0, 0, 0]).reshape(3, 2).T[:, pancake] np.random.shuffle(sides) return sides # sim 10,000 pancakes pancakes = [] for i in range(10_000): pancakes.append(sim_pancake()) pancakes = np.array(pancakes).T up = pancakes[0] down = pancakes[1] # compute proportion 1/1 (BB) out of all 1/1 and 1/0 num_11_10 = np.sum(up == 1) num_11 = np.sum((up == 1) & (down == 1)) num_11 / num_11_10 d = pd.read_csv(WAFFLE_DIVORCE_DATASET_PATH, sep=";") # points ax = az.plot_pair(d[["MedianAgeMarriage", "Divorce"]].to_dict(orient="list"), plot_kwargs=dict(ms=15, mfc="none")) ax.set(ylim=(4, 15), xlabel="Median age marriage", ylabel="Divorce rate") # standard errors for i in range(d.shape[0]): ci = d.Divorce[i] + np.array([-1, 1]) * d["Divorce SE"][i] x = d.MedianAgeMarriage[i] plt.plot([x, x], ci, "k") dat = dict( D_obs=tf.cast(d.Divorce.pipe(lambda x: (x - x.mean()) / x.std()).values, dtype=tf.float32), D_sd=tf.cast(d["Divorce SE"].values / d.Divorce.std(), dtype=tf.float32), M=d.Marriage.pipe(lambda x: (x - x.mean()) / x.std()).values, A=d.MedianAgeMarriage.pipe(lambda x: (x - x.mean()) / x.std()).values, N=d.shape[0]) def model_15_1(A, M, D_sd, N): def _generator(): alpha = yield Root(tfd.Sample(tfd.Normal(loc=0., scale=0.2, name="alpha"), sample_shape=1)) betaA = yield Root(tfd.Sample(tfd.Normal(loc=0., scale=0.5, name="betaA"), sample_shape=1)) betaM = yield Root(tfd.Sample(tfd.Normal(loc=0., scale=0.5, name="betaM"), sample_shape=1)) sigma = yield Root(tfd.Sample(tfd.Exponential(rate=1., name="sigma"), sample_shape=1)) mu = alpha[...,tf.newaxis] + betaA[...,tf.newaxis] * A + betaM[...,tf.newaxis] * M scale = sigma[...,tf.newaxis] D_true = yield tfd.Independent(tfd.Normal(loc=mu, scale=scale), reinterpreted_batch_ndims=1) D_obs = yield tfd.Independent(tfd.Normal(loc=D_true, scale=D_sd), reinterpreted_batch_ndims=1) return tfd.JointDistributionCoroutine(_generator, validate_args=False) jdc_15_1 = model_15_1(dat["A"], dat["M"], dat["D_sd"], dat["N"]) NUM_CHAINS_FOR_15_1 = 2 init_state = [ tf.zeros([NUM_CHAINS_FOR_15_1]), tf.zeros([NUM_CHAINS_FOR_15_1]), tf.zeros([NUM_CHAINS_FOR_15_1]), tf.ones([NUM_CHAINS_FOR_15_1]), tf.zeros([NUM_CHAINS_FOR_15_1, dat["N"]]), ] bijectors = [ tfb.Identity(), tfb.Identity(), tfb.Identity(), tfb.Exp(), tfb.Identity() ] trace_15_1 = sample_posterior(jdc_15_1, observed_data=(dat["D_obs"],), params=['alpha', 'betaA', 'betaM', 'sigma', 'D_true'], init_state=init_state, bijectors=bijectors) az.summary(trace_15_1, round_to=2, kind='all', credible_interval=0.89) dat = dict( D_obs=tf.cast(d.Divorce.pipe(lambda x: (x - x.mean()) / x.std()).values, dtype=tf.float32), D_sd=tf.cast(d["Divorce SE"].values / d.Divorce.std(), dtype=tf.float32), M_obs=tf.cast(d.Marriage.pipe(lambda x: (x - x.mean()) / x.std()).values, dtype=tf.float32), M_sd=tf.cast(d["Marriage SE"].values / d.Marriage.std(), dtype=tf.float32), A=d.MedianAgeMarriage.pipe(lambda x: (x - x.mean()) / x.std()).values, N=d.shape[0]) def model_15_2(A, M_sd, D_sd, N): def _generator(): alpha = yield Root(tfd.Sample(tfd.Normal(loc=0., scale=0.2, name="alpha"), sample_shape=1)) betaA = yield Root(tfd.Sample(tfd.Normal(loc=0., scale=0.5, name="betaA"), sample_shape=1)) betaM = yield Root(tfd.Sample(tfd.Normal(loc=0., scale=0.5, name="betaM"), sample_shape=1)) sigma = yield Root(tfd.Sample(tfd.Exponential(rate=1., name="sigma"), sample_shape=1)) M_true = yield Root(tfd.Sample(tfd.Normal(loc=0., scale=1., name="M_true"), sample_shape=N)) mu = alpha[...,tf.newaxis] + betaA[...,tf.newaxis] * A + betaM[...,tf.newaxis] * M_true scale = sigma[...,tf.newaxis] D_true = yield tfd.Independent(tfd.Normal(loc=mu, scale=scale), reinterpreted_batch_ndims=1) D_obs = yield tfd.Independent(tfd.Normal(loc=D_true, scale=D_sd), reinterpreted_batch_ndims=1) M_obs = yield tfd.Independent(tfd.Normal(loc=M_true, scale=M_sd, name="M_obs"), reinterpreted_batch_ndims=1) return tfd.JointDistributionCoroutine(_generator, validate_args=False) jdc_15_2 = model_15_2(dat["A"], dat["M_sd"], dat["D_sd"], dat["N"]) NUM_CHAINS_FOR_15_2 = 2 init_state = [ tf.zeros([NUM_CHAINS_FOR_15_2]), tf.zeros([NUM_CHAINS_FOR_15_2]), tf.zeros([NUM_CHAINS_FOR_15_2]), tf.ones([NUM_CHAINS_FOR_15_2]), tf.zeros([NUM_CHAINS_FOR_15_2, dat["N"]]), # M_True tf.zeros([NUM_CHAINS_FOR_15_2, dat["N"]]), # D_True ] bijectors = [ tfb.Identity(), tfb.Identity(), tfb.Identity(), tfb.Exp(), tfb.Identity(), tfb.Identity() ] trace_15_2 = sample_posterior(jdc_15_2, observed_data=(dat["D_obs"], dat["M_obs"]), params=['alpha', 'betaA', 'betaM', 'sigma', 'M_true', 'D_true'], init_state=init_state, bijectors=bijectors) post_D_true = trace_15_2.posterior["D_true"].values[0] post_M_true = trace_15_2.posterior["M_true"].values[0] D_est = np.mean(post_D_true, 0) M_est = np.mean(post_M_true, 0) plt.plot(dat["M_obs"], dat["D_obs"], "bo", alpha=0.5) plt.gca().set(xlabel="marriage rate (std)", ylabel="divorce rate (std)") plt.plot(M_est, D_est, "ko", mfc="none") for i in range(d.shape[0]): plt.plot([dat["M_obs"][i], M_est[i]], [dat["D_obs"][i], D_est[i]], "k-", lw=1) # Simulated toy data N = 500 A = tfd.Normal(loc=0., scale=1.0).sample((N,)) M = tfd.Normal(loc=-A, scale=1.0).sample() D = tfd.Normal(loc=A, scale=1.0).sample() A_obs = tfd.Normal(loc=A, scale=1.).sample() N = 100 S = tfd.Normal(loc=0., scale=1.).sample((N,)) H = tfd.Binomial(total_count=10, probs=tf.sigmoid(S)).sample() D = tfd.Bernoulli(0.5).sample().numpy() # dogs completely random Hm = np.where(D == 1, np.nan, H) Hm D = np.where(S > 0, 1, 0) Hm = np.where(D == 1, np.nan, H) Hm # TODO - use seed; have not been able to make it work with tfp N = 1000 X = tfd.Sample(tfd.Normal(loc=0., scale=1.), sample_shape=(N,)).sample().numpy() S = tfd.Sample(tfd.Normal(loc=0., scale=1.), sample_shape=(N,)).sample().numpy() logits = 2 + S - 2 * X H = tfd.Binomial(total_count=10, logits=logits).sample().numpy() D = np.where(X > 1, 1, 0) Hm = np.where(D == 1, np.nan, H) dat = dict(H=H, S=S) def model_15_3(S): def _generator(): alpha = yield Root(tfd.Sample(tfd.Normal(loc=0., scale=1., name="alpha"), sample_shape=1)) betaS = yield Root(tfd.Sample(tfd.Normal(loc=0., scale=0.5, name="betaS"), sample_shape=1)) logits = tf.squeeze(alpha[...,tf.newaxis] + betaS[...,tf.newaxis] * S) H = yield tfd.Independent(tfd.Binomial(total_count=10, logits=logits), reinterpreted_batch_ndims=1) return tfd.JointDistributionCoroutine(_generator, validate_args=False) jdc_15_3 = model_15_3(dat["S"]) NUM_CHAINS_FOR_15_3 = 4 alpha_init, betaS_init, _ = jdc_15_3.sample() init_state = [ tf.tile(alpha_init, (NUM_CHAINS_FOR_15_3,)), tf.tile(betaS_init, (NUM_CHAINS_FOR_15_3,)) ] bijectors = [ tfb.Identity(), tfb.Identity(), ] trace_15_3 = sample_posterior(jdc_15_3, observed_data=(dat["H"],), params=['alpha', 'betaS'], init_state=init_state, bijectors=bijectors) az.summary(trace_15_3, round_to=2, kind='all', credible_interval=0.89) dat = dict(H=H[D==0], S=S[D==0]) def model_15_4(S): def _generator(): alpha = yield Root(tfd.Sample(tfd.Normal(loc=0., scale=1., name="alpha"), sample_shape=1)) betaS = yield Root(tfd.Sample(tfd.Normal(loc=0., scale=0.5, name="betaS"), sample_shape=1)) logits = tf.squeeze(alpha[...,tf.newaxis] + betaS[...,tf.newaxis] * S) H = yield tfd.Independent(tfd.Binomial(total_count=10, logits=logits), reinterpreted_batch_ndims=1) return tfd.JointDistributionCoroutine(_generator, validate_args=False) jdc_15_4 = model_15_4(dat["S"]) NUM_CHAINS_FOR_15_4 = 2 alpha_init, betaS_init, _ = jdc_15_4.sample() init_state = [ tf.tile(alpha_init, (NUM_CHAINS_FOR_15_4,)), tf.tile(betaS_init, (NUM_CHAINS_FOR_15_4,)) ] bijectors = [ tfb.Identity(), tfb.Identity(), ] trace_15_4 = sample_posterior(jdc_15_4, observed_data=(dat["H"],), params=['alpha', 'betaS'], init_state=init_state, bijectors=bijectors) az.summary(trace_15_4, round_to=2, kind='all', credible_interval=0.89) D = np.where(np.abs(X) < 1, 1, 0) N = 100 S = tfd.Normal(loc=0., scale=1.).sample((N,)) H = tfd.Binomial(total_count=10, logits=S).sample().numpy() D = np.where(H < 5, 1, 0) Hm = np.where(D == 1, np.nan, H) Hm
0.526343
0.835517
<a href="https://colab.research.google.com/github/andresni/GameTheoryStuff/blob/master/TEKoblig1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> #This is a notebook for Oblig 1 in TEK9010 Before we start this fun excercise, we need to import some modules for later use. Note: sorry for the long winded explanations and "proofs". This was a very fun excercise. Also, if some of the cells doesn't work, then run them a couple of times for the interactive elements to load properly. If it doesn't work, let me know and I'll send pdf versions. ``` import numpy as np import matplotlib.pyplot as plt import scipy.signal as ss import scipy.stats as sct import copy import ipywidgets as widgets from ipywidgets import interact, interact_manual from matplotlib import animation, rc, colors from IPython.display import HTML, display ``` With that out of the way, let's detail some of the background information of the scenario. 1. Many people in an area (1D, 2D, or 3D) wants to connect to the internet. 2. Mobile drones fly in the air providing internet. 3. The people want to connect to the drone closest to them as it has the highest signal power. 4. The drones wants to have the most people connected to them, because that feels good. We also need some starting assumptions that I hope are not too crazy. 1. Agents (drones/people) cannot occupy the same space at the same time. 2. People are spread uniformly in the area ___ Let's start with a game theoretic analysis with a starting point in Hotelling's 1929 paper. Hotelling investigates how duopolies operate, specifically their location, in a 1 dimensional world. He argues that if we assume an even spread of demand along an axis, and two competitors who want to maximize their profits, then they want to have their location so as to maximize potential customers, and adjusting prices accordingly. Customers in Hotelling's scenario care about distance to company as that costs them utility (i.e. transport cost, energy, comfort) and price of product. Simply, if price+distance company A > price+distance company B, then they'll go to company A. In the drone scenario outlined above, location is the only relevant parameter. We can solve the game in various ways. The way the game plays out is almost the same wether it's played simultaneously or sequentially, and if it's a one-shot game or a repeated game. But let's deal with each in turn for the 2 agent scenario anyway. But first, a general observation. Across all conditions, the same is always true: the best response of agent A is always to be one unit to the left or right of agent B depending on if agent B is to the right or left of center, respectively. In other words, the best place to be is just a tiny bit closer to the center than the other agent. Reason: if $location(AgentB) = 0.8$ on a axis: $[0, 1]$, and $location(AgentA) = 0.5$, then Agent A has a utility of $0.5 - 0 + \dfrac{0.8 -0.5}{2}$ (distance to edge + half of the area between the agents). Agent B on the other hand has $1 - 0.8 + \dfrac{0.8 -0.5}{2}$. This gives $Utility(A) = 0.65$, and $Utility(B) = 0.35$, or $1 - Utility(A)$. Varying position of Agent B will give different utilities, but never better than A. As we can see, the unit that is closest to the center of the two agents covers half the area (edge to center), + distance to center + half of the distance between the two agents. This can be seen in the interactive plot below (put Position_Agent_A to 500). ``` @interact_manual # Interactive plot def game_plot(Position_Agent_A=(0,1000,1)): # Location of agent A, adjusted interactively!! lA = Position_Agent_A uB = [] # Utility of agent B for lB in range(0,1001): # Over each possible location of agent B if lB < lA: uB.append(lB + ((lA) - lB) / 2) if lB > lA: uB.append(1000 - lB + ((lB) - lA) / 2) if lB == lA: uB.append(500) plt.figure(); uB = np.array(uB) plt.plot(uB/1000,'b',label="Agent B") plt.plot(1-(uB/1000),'r',label="Agent A") plt.title("Utility as a function of position") plt.legend() plt.xlabel("Position Agent B") plt.ylabel("Fraction of coverage") ``` As can be seen in the above interactive plot, if both agents occupy the same spot (relaxing assumption 1) then they split the area. If agent B goes to the left of Agent A when Agent A is right of center (>500) then Agent B has the most coverage, and vice versa. ____ So let's look at the different conditions. **Simultaneous + one-shot:** here both agents should choose the middle (coin toss for exact position since stacking is not allowed). Reason: no agent can know where the other will be, and so while the middle is not necessarily the best response given information about the location of the other agent, it's never a bad response as it will always give you more coverage than your opponent. Thus the middle for both agents is the Nash equilibrium, but not the social welfare maximum (25% / 75% location) in the Hotelling scenario. The middle is also pareto optimal/efficient as any agent will be worse off by relocating (to the benefit of the other), but then, any position is pareto optimal since the game is zero-sum in a sense. **Simultanous + repeated:** The solution here is the same as the above, as there'll be no movement of agents due to pareto optimality and nash equilibria being in the center, unless ofcourse one agent is irrational and chooses another location and stays there, at which point Agent B should switch, though that's beyond this discussion. **Sequential + one-shot:** The solution here is also simple. Wherever Agent A positions itself, agent B should go one unit to the left or right depending on if Agent A is right or left of center, respectively. Agent A on the other hand, should, as we learned from the two above conditions, position itself in the center. If there is an exact center, then Agent A wins as all other spots are worse for Agent B. If there is no exact center where it's possible to stand, i.e. the integer line between 1 and 6 (middle [3,4]), then they split. This is the first mover advantage. **Sequential + repeated:** Obviously, we get the same solution here as above. There's one additional comment to make on the above conditions. Given how calculations are made, then it's easy to see that the distance both agents have to the center needs to be equal for them to get the same utility. So, if one could plan ahead and communicate, then one could in the simultaneous games atleast maximize social welfare, but given self interested agents, we will always descend on the middle as agents defect from cooperation. This explains why coffee shops open up next to eachother along a street, or how political 2-party systems have parties that are just left and right of center (thought the real world is irrational and sometimes they push each other further and further away from the real center as can be seen in the US currently). However, adding more agents into the mix makes the whole scenario more interesting. The Nash is no longer in the center! In a simultaneous game, the best response (Nash) is a mixed stragety of any point in the middle two quartiles (Shaked, 1982). I discuss this a bit more at the bottom of this text as it's not part of the excercise (and I might be wrong). The social welfare optima in a 2 drone game is achieved by splitting the game area in two, and have a drone in the center of each (positions 250/750). We can do the following to show this. Analytically, the average distace to a point on a line can be summed up as: $$Avg.dist. = AD = \dfrac{\dfrac{a^2}{2}+\dfrac{b^2}{2}}{a+b}$$ where $a$ is the distance from agent A's position to the left edge, and $b$ the distance to the right edge. Since $a+b$ is always the same, and knowing how exponentials work, then $argmin(AD)$ is achieved by $a = b$, proving that splitting the axis is optimal. This means that the $AD$ for a drone in the center of a 1000 long axis = 250. But since we have $n > 1$ drones, then we get $$Global.AD = GAD = \dfrac{1}{n}\sum_{i=1}^{n}(\dfrac{\dfrac{a_i^2}{2}+\dfrac{b_i^2}{2}}{a_i+b_i})$$ where $a_i$ and $b_i$ is the distances to the left and right edges of drone $n$'s segment. As we've shown that the best way to split a line is in the middle, to minimize $AD$ then similarily, we should split the line into equally long segments so to minimize the contribution of each segment of the line to $GAD$. In fact, with $n$ drones, then the $GAD_{NASH} = n * GAD_{WELFARE}$. Below, I show this in some other ways. ``` # Finding the min through search mini = 100000 maxi = 0 loc = [0,0]; locm = [0,0] for a in range(1,101): # Cheating a bit using 100 instead of 1000. for b in range(1,101): if not a == b: t = np.mean([np.min([np.abs(i-a),np.abs(i-b)]) for i in range(1,101)]) loc = [a,b] if t < mini else loc locm = [a,b] if t > maxi else locm mini = t if t < mini else mini maxi = t if t > maxi else maxi print("Brute force Search for social welfare maximum = {:.2f} with one drone at location {} and the other at {}\n".format(mini*10,loc[0]*10,loc[1]*10)) print("and the Nash equilibria has an average distance of {:.2f} with one drone at {} an the other at {}, as proven earlier\n".format(mini*2*10,500,500)) print("The 'evil dictator optimum' however is {:.2f} at {} and {}, although one drone is not very happy.\n".format(maxi*10,locm[0],locm[1])) print("Theoretically, drones at 0 and 0 would create a absolute maximum of 500 but the simulation above is not completely accurate.") print("********************\n") plt.figure() y = [np.divide(np.mean([0,x])*x+np.mean([0,1000-x])*(1000-x),1000) for x in range(1001)] plt.plot(y,'b',label="Point on line") plt.title("Average distance to a point on the line") plt.ylabel("Average distance") plt.ylim([0,500]) plt.legend() print("Now, a little plot over dividing lines, and argmin over the mean distance, would result in the center.\n") plt.show() print("*******************\n") # Interactively print("And finally, an interactive plot showing the average distance for mobile users for different drone locations.\n") @interact_manual # Interactive plot def game_plot(Position_Agent_A=(0,1000,1),Position_Agent_B=(0,1000,1)): # Location of agent A, adjusted interactively!! lA = Position_Agent_A lB = Position_Agent_B plt.figure() plt.plot([lB,lB],[0,1],'b--',label="Agent B") plt.plot([lA,lA],[0,1],'r--',label="Agent A") plt.legend() plt.xlabel("Position") plt.ylabel("Distance to nearest drone") plt.ylim([0,500]) for i in range(1001): plt.bar([i],np.min([np.abs(i-lA),np.abs(i-lB)])) plt.title("Average distance to drones: {}".format(np.mean([np.min([np.abs(i-lA),np.abs(i-lB)]) for i in range(1,1001)]))) ``` ___ The above discussion should answer question 1 and 2 of the excercise. But if not clear, here's a summary: Hotelling's study on duopolies show that given two self interested agents who wants to have the most customers/coverage of a limited 1D area, the Nash equilibrium is both agents positioning themselves in the middle of the axis as that splits the customerbase. No agent can improve from this. For the drone scenario, the situation is the same, as the two drones compete for mobilephone connections, which are spread out equally along a 1D axis. For both customers, and mobile phone users, the social optimum would be if average distance to shop/drone was as little as possible. Since the average distance to one point on a line from all other points is the smallest when the target is in the middle, then it stands to reason that dividing the 1D line into $n$ segments, and placing the agents in the middle of those segments, creates the social optimum. For multiple agents however, Nash eq is a mixed strategy of any point in the middle two quartiles with equal probability. ___ Now we're moving to the "real world", that is, a 2D grid. Here I'll simulate 2 agents , and how they move around in the grid in a simultaneous repeated game of "wifi drones"! Each agent is independent of the other(s) in the sense that they will sample the space in a random direction, and if that location is better, they'll move there. The reason for random sampling is because in real life, oversampling costs time and mobile users on the ground won't necessarily connect to the closest one immediately. So, the drones move in a random direction and distance, samples, and if that's better coverage, then they stay there. If not, then they'll go back to the previous location. This way, drones need only to know the boundaries of the area, and how many users they have (users and coverage are interchangeable here). ``` print("An interactive simulation. Note that with drones=10, tau=5000, size=100, it might take a up to 30 minutes.\n") print("However, I recommend running a few with time>2000, just to get better averages\n") print("in the bottom heatmap, and estimation of mean coverage and mean number of users.\n") print("Start is the kind of drone position start you want, but note that only 'Random' works for drones not equal to 4\n") print("If not-random start, have drones at 4, movefac at gauss, and speed low for best results.") print("Finally, avgdistfac is a tool for weighting the influence of avg dist to users (exponentially) relative to global optima. 0 = no effect.\n") @interact_manual def wifi_drone_game(tau=(10,5000,1), drones=(2,10,1), speed=(1,7,1), size=(10,100,1), start=["Random","Social_Optima","Center_Nash"],movefac=["uniform","gauss"],avgdistfac=(0,5,1)): # Inputs: # Tau = Time units # Drones = number of Drones # Speed = Max distance a drone can move # Size = Size of area (it's a square) m = [] # Social optima for squares for x in range(size): for y in range(size): m.append(np.sqrt((x-size/2)**2 + (y-size/2)**2)) m = np.mean(m) area = [size,size] # Size of area in each dimension start_locations = {"Random":[[np.random.randint(0,size+1),np.random.randint(0,size+1)] for i in range(drones)], # Random starting locations "Social_Optima":[[size/4,size/4],[3*size/4,3*size/4],[size/4,3*size/4],[3*size/4,size/4]], # 4 drone Social optima "Center_Nash":[[size/2 + np.random.random()/10,size/2 + np.random.random()/10], [size/2 + np.random.random()/10,size/2 + np.random.random()/10], [size/2 + np.random.random()/10,size/2 + np.random.random()/10], [size/2 + np.random.random()/10,size/2 + np.random.random()/10]], # Center Nash } class drone: # A drone def __init__(self, location): self.location = [location,[0,0]] # The drone's current and temp location in the 2D grid self.users = [0,0] # Number of current and temp users connected to the drone self.mem = [0,0] # A memory of how to get back self.avgdist = 0 def move(self,direction): # A function that moves the drone unless it would cross over boundary self.location[1] = np.add(self.location[0], direction) # Moving self.location[1] = np.add(self.location[0],self.mem) if np.sum(np.abs(self.mem)) > 0 else self.location[1] # Go back for i in range(2): # Checking boundary conditions if self.location[1][i] > size: self.location[1][i] = size elif self.location[1][i] < 0: self.location[1][i] = 0 self.mem = [-1*x for x in direction] # Reverse coordinates def update(self,adjf): # Updates position and number of users for new location self.users[1] = self.users[1]*(adjf/self.avgdist)**avgdistfac if self.users[0] < self.users[1]: # If new place is better, reset memory (agent doesn't go back) self.mem = [0,0] self.location[0] = self.location[1] self.users[0] = self.users[1] self.users[1] = 0 def best_drone(size,dronelist): # A function that updates the number of users each drone has a = np.zeros([size,size]) b = [] for x in range(size): for y in range(size): dist = [np.sqrt((x-d.location[1][0])**2 + (y-d.location[1][1])**2) for d in dronelist] a[y,x] = dist.index(min(dist)) dronelist[dist.index(min(dist))].users[1] += 1 dronelist[dist.index(min(dist))].avgdist += min(dist) b.append(min(dist)) b=np.mean(b) return a,b # Returns a map of each drone's coverage, and mean distance to drones dronelist = [drone(start_locations[start][i]) for i in range(drones)] # Creating drones # Keeping track of stuff score_hist = [] # Number of users position_hist = [] # History of each drone's location mat = [] # Coverage history mean_dist = [] # Average distance to connections # Some plotting stuff before we begin movie time! fig,ax = plt.subplots() colrs = ["r","b","k","g","y","w","brown","orange","pink","grey"] ax.axis([0,size,0,size]) im = ax.imshow(np.zeros([size,size]),vmin=0,vmax=drones) dot = ax.scatter([[0]*drones], [[0]*drones], c=colrs[0:drones], s=100, edgecolor="k") patches = [im] + [dot] def init(): im.set_data(np.zeros([size,size])) dot.set_offsets([]) return patches def animate(i): a = [x[0] for x in position_hist[i]] b = [x[1] for x in position_hist[i]] im.set_data(mat[i]) dot.set_offsets(np.c_[a,b]) return patches # Now we're ready to simulate for t in range(tau): # Each drone moves in a random direction if movefac == "uniform": [d.move([np.random.randint(-speed,speed+1),np.random.randint(-speed,speed+1)]) for d in dronelist] # Uniform movement speed distribution else: [d.move([np.random.normal(0,speed/2),np.random.normal(0,speed/2)]) for d in dronelist] # Uniform movement speed distribution # Updating scores a,b = best_drone(size,dronelist) mat.append(copy.deepcopy(a)) mean_dist.append(copy.deepcopy(b)) # Moving drone to new location if better [d.update(m/drones) for d in dronelist] # Storing results score_hist.append([d.users[0] for d in dronelist]) position_hist.append([d.location[0] for d in dronelist]) anim = animation.FuncAnimation(fig, animate, init_func=init, frames=tau, interval=100, blit=True) plt.close("all") display(HTML(anim.to_html5_video())) # Average payoffs, avg. distance, nash, etc print("*******************************\n") print("The average payoffs for the drones are {}\n".format(np.mean(score_hist,axis=0))) print("Mean distance to drones = {:.2f}, compared to Center = {:.2f}, and social optima = {:.2f} assuming area can be divided in n equal sized squares.\n".format(np.mean(mean_dist),m,m/drones)) print("********************\n") # Location map matr = np.zeros([size,size]) for x in position_hist: for y in x: matr[int(y[1])-1,int(y[0])-1] += 1 fig,ax = plt.subplots() ax.imshow(matr,cmap="hot",interpolation="nearest", origin="lower") ax.add_artist(plt.Circle((size/2,size/2),(size/2)/1.5,color="w",fill=False)) ax.add_artist(plt.Circle((size/2,size/2),(size/2)/2,color="w",fill=False)) ax.add_artist(plt.Circle((size/2,size/2),(size/2)/20,color="w",fill=False)) plt.title("Heatmap over location history") print("White circles represent estimated social optima for 3-4 drones and center\n") ``` ___ To summarize the above, and answer question 3 of the excercise, we can observe the following. While we know from theoretical analysis that there is a Nash equilibria in the middle for a 1D line for 2 drones, that also extends to higher dimensions, which can be observed in the animation (though randomness makes it unstable). In the 1D game, we saw that n>2 drones results in Nash being a mixed strategy. This is also the case in 2D. If all drones are packed in the middle in a tight ring, any movement away from the center is a net gain for a given drone (if the others stay still). Then the best bet for everyone is to follow suit (if sequential), as shown below. ``` def best_drone(size,dronelist): # A function that updates the number of users each drone has a = np.zeros([size,size]) b = [] for x in range(size): for y in range(size): dist = [np.sqrt((x-d[0])**2 + (y-d[1])**2) for d in dronelist] a[y,x] = dist.index(min(dist)) b.append(min(dist)) b=np.mean(b) return a,b # Returns a map of each drone's coverage, and mean distance to drones print("Example of moving in grid (note: not exact counts due to grid vs. float positions and edges)") plt.figure() plt.subplot(1,3,1) size = 50 # All drones pretty much in center dronelist = [[24.9,25.1],[25.1,24.9],[25.1,25.1],[24.9,24.9]] a1,b1 = best_drone(size,dronelist) plt.imshow(a1, origin="lower") plt.scatter([d[0] for d in dronelist],[d[1] for d in dronelist], s=100, edgecolor="k") #Moving one drone a bit off plt.subplot(1,3,2) dronelist = [[24.9,25.1],[25.1,24.9],[29.1,25.1],[24.9,24.9]] a2,b2 = best_drone(size,dronelist) plt.imshow(a2, origin="lower") plt.scatter([d[0] for d in dronelist],[d[1] for d in dronelist], s=100, edgecolor="k") # Moving another drone a bit off plt.subplot(1,3,3) dronelist = [[24.9,25.1],[25.1,22.9],[29.1,25.1],[24.9,24.9]] a3,b3 = best_drone(size,dronelist) plt.imshow(a3, origin="lower") plt.scatter([d[0] for d in dronelist],[d[1] for d in dronelist], s=100, edgecolor="k") plt.show() print("Left plot shows all drones equally placed around center. This is precarious ({})!\n".format(np.unique(a1,return_counts=True)[1])) print("because middle plot shows one drone moving off and gaining a lot (diff: {})!\n".format(np.unique(a2,return_counts=True)[1]-np.unique(a1,return_counts=True)[1])) print("Which prompts the next drone to move off as well, shown in the right plot.(diff: {})!\n".format(np.unique(a3,return_counts=True)[1]-np.unique(a1,return_counts=True)[1])) plt.figure() plt.subplot(1,3,1) # All drones at social optima dronelist = [[12.5,25.],[25.,12.5],[37.5,25.],[25.,37.5]] a1,b1 = best_drone(size,dronelist) plt.imshow(a1, origin="lower") plt.scatter([d[0] for d in dronelist],[d[1] for d in dronelist], s=100, edgecolor="k") #Moving one drone a bit off plt.subplot(1,3,2) dronelist = [[10.5,25.],[25.,12.5],[37.5,25.],[25.,37.5]] a2,b2 = best_drone(size,dronelist) plt.imshow(a2, origin="lower") plt.scatter([d[0] for d in dronelist],[d[1] for d in dronelist], s=100, edgecolor="k") # Moving another drone a bit off plt.subplot(1,3,3) dronelist = [[14.5,25.],[25.,12.5],[37.5,25.],[25.,37.5]] a3,b3 = best_drone(size,dronelist) plt.imshow(a3, origin="lower") plt.scatter([d[0] for d in dronelist],[d[1] for d in dronelist], s=100, edgecolor="k") plt.show() print("Left plot shows all drones equally placed near social optima (center of triangle). This is precarious ({})!\n".format(np.unique(a1,return_counts=True)[1])) print("because middle plot shows one drone moving off and losing a lot (diff: {})!\n".format(np.unique(a2,return_counts=True)[1]-np.unique(a1,return_counts=True)[1])) print("no matter which way it goes, shown in the right plot.(diff: {})!\n".format(np.unique(a3,return_counts=True)[1]-np.unique(a1,return_counts=True)[1])) #unique(a, return_counts=True) ``` In a simultaneous game however, through backwards induction, every agent should move one unit further out than the others, but not further than length axis / 2, because at that point, every direction gives worse coverage, as seen in plots above (even with some uneveness in grid). This means that for n > 2 drones, small step iterated game, then Nash = a ring shaped structure. In the animation above however, we don't always get that Nash equilibrium emerging. Instead, one can see that drones often clump together that then "battle" it out for position, as is expected from a game theoretical point (the best place to be is on just the edge of another agent). We can also observe that over time, the drones tend to have quite similar average user count. Both these observations indicate that the agents are somewhat "rational". We can also observe that the agents hover around the middle in a ring shape (if n > 3), although not exactly in the center. The heatmap of location history confirms this observation. Also evident is that as long as there's a rough equality in coverage, the drones tend to keep formation, while as soon as inequality arises, one can observe more rapid dynamics as new positions turn out to be better more often than not. As such, there emerges a semi stable dynamic of a ring, with equal distance between agents, that approaches the social optima with 3-4 drones (which is also the Nash in an iterative game with small movements). For five drones, one should be in the center. For more drones, then the optimal structure changes according to number of drones. Theoretically, the optima can be calculated as avg. distance if all drones in the center, divided by number of drones, as with the 1D case. For the 4 drone case this is close to the small step iterated Nash. There are 3 main reasons why we don't see a stable center Nash (2 drones) or a social optima nash (4 drones), or an extension of the mixed strategy nash in the 1D condition with multiple drones. 1. if a random sampling didn't turn out to be better, the drone returns to it's previous position, which might not be a good spot anymore since the other agents might have moved as well. Since the game is simultanous, then small random changes can cause big divergences in optimal behavior, such as, location B being better than A, but only before everyone else has decided on their positions, at which point it might be worse. 2. An agent can "block" in another agent, such that the only way to improve is to cross over the edge of another agent's sphere of influence, which might require a higher speed than it's random walk allows. 3. With multiple agents, there's a risk inherent in moving. So while the agents will tend to form a semi stable ring constellation, it might move around the area, and as such it's hard for the agents towards the edge to break free. In other words, given the stochastic movement stability is hard to find. There are some solutions that could be implemented to "improve" behavior. 1. Memory of number of users, x rounds back, so that it moves in the direction of where it usually gets good numbers, if it doesn't improve in the direction it's going. Adding this option would add assumptions to the game however, and as such is not implemented here. 2. Have a non-uniform distribution in "speed" so that the drone occasionally makes bigger jumps to avoid local minima. This has been implemented, and the curious reader can select the "gauss" option in the parameters. 3. Communication between agents such as knowing how many users they have, or distance between them. Not implemented here. 4. Sequential checking, i.e. a drone moves, samples, and then decides if it's staying or not. Before any other drones does anything. This would increase computational complexity a lot due to calculation of users, but would result in better decision making and more stable structures. So in summary, to answer the questions in point 3 of the excercise directly: 1. The simulation does not end up in a Nash equilibria with more than 2 agents, due to randomness, but the overall positioning is an approximation for 4 drones. For 2 agents, the center will be on average a hotspot. 2. The simulation tends towards somewhere between a social welfare optimum and the "worst" which is all drones in the center, though due to randomness, seldom achieves it. The social optimum is to divide the area in n equal areas (shape, size) and have a drone in the geometric center of it, just like with a 1D space. But this division is not stable. If you divide the area in 9 for example, then drones along the edges are incentiviced to push towards the middle. ___ Now, let's move on to game matrices. A game matrix is a utility matrix, or payoff matrix, for different choices an agent can make vs the responses of other agents. While in decision theory where one is more concerned with the choices of an agent, given a scenario, in a game matrix one explicitely models the choices of other agents as well. For example, in the drone scenario above, we could say that an agents best response is always to move in the direction of more coverage if such a direction exist. For the 1D line, this will end with all agents battling it out in the center. Or in a simultaneous choice variant, then one could simulate agents who update their probabilities for choosing a particular spot according to average payoff. I've done this below. One can see that for 2 agents, with some winbonus (reward for being best, indicating no real need to improve position) , that over time probabilities converge on the middle. With 3 agents however, it reflects more a flatter distribution in the middle 2 quartiles, as expected from theory. More agents results in a bigger and flatter spread. ``` print("Drones is number of drones, rounds is number of iterations (300 recommended), samples is how much to sample each probability dist\n") print("with 2xsize recommended, size is length of 1D line (10-20*drones recommended), and winbonus is how important it is to win, i.e. an indication\n") print("that there's little reason to improve location.") @interact_manual def wifi_dronesB(drones=(2,6,1),rounds=(1,300,1),size=(10,102,1), samples=(10,500,1),winbonus=(1,10,1)): gmat = np.zeros((drones,size))+1/size # Game matrix resmat = [] # Results def game(pos,size): # Game function score = [0]*len(pos) for i in range(size): # Checking which is neares x = [np.abs(i-y+(np.random.random()/100)) for y in pos] # Addind a tiny random to resolve stacking score[x.index(min(x))] += 1 for i in np.where(score == max(score))[0]: score[i] *= winbonus return score def normalize(mat): # Normalizes a 2D matrix for i in range(len(mat)): mat[i] /= np.sum(mat[i]) return mat # Some plotting stuff before we begin movie time! fig,ax = plt.subplots() barcol = ax.bar(np.linspace(0,size-1,size),np.mean(gmat,axis=0)) ax.set_ylim([0,1]) def init(): for i in range(len(barcol)): barcol[i].set_height(np.mean(gmat,axis=0)[i]) return barcol def animate(i): for idx in range(len(barcol)): barcol[idx].set_height(resmat[i][idx]) return barcol # Now we're ready to simulate for t in range(rounds): scores = np.zeros((drones,size)) counts = np.zeros((drones,size))+0.001 # Ten, each drone picks a location, we'll do this some times for it in range(samples): positions = [np.random.choice(size, p=gmat[i]) for i in range(drones)] # Calculate score temp = game(positions,size) # Updating probabilities for i in range(drones): scores[i,positions[i]] += temp[i] counts[i,positions[i]] += 1 # And updating our game matrix temp = np.divide(scores,counts) gmat = normalize(np.add(gmat,normalize(temp))) # And store the results resmat.append(np.mean(gmat,axis=0)) print("Probability distribution of picking a specific location, averaged over drones") anim = animation.FuncAnimation(fig, animate, init_func=init, frames=rounds, interval=100, blit=True) plt.close("all") display(HTML(anim.to_html5_video())) ``` ___ Let's look at game matrices. The following is a matrix for 2 agents playing wifi drones on a 1D line of length 5. Each cell indicates expected payoff, and equal placement is resolved with cointoss. For example, center position, gives 2 units to the left, 2 to the right, and 1 where you stand. | | 1 | 2 | 3 | 4 | 5 | |---|------------|------------|------------|------------|------------| | **1** | (2.5, 2.5) | (1.0, 4.0) | (1.5, 3.5) | (2.0, 3.0) | (2.5, 2.5) | | **2** | (4.0, 1.0) | (2.5, 2.5) | (2.0, 3.0) | (2.5, 2.5) | (3.0, 2.0) | | **3** | (3.5, 1.5) | (3.0, 2.0) | (2.5, 2.5) | (3.0, 2.0) | (3.5, 1.5) | | **4** | (3.0, 2.0) | (2.5, 2.5) | (2.0, 3.0) | (2.5, 2.5) | (4.0, 1.0) | | **5** | (2.5, 2.5) | (2.0, 3.0) | (1.5, 3.5) | (1.0, 4.0) | (2.5, 2.5) | Since this is a symmetric game, we can analyze from the perspective of the top agent (i.e. (2.5, **2.5**)). We can see that choosing position 1 (collumn 1) is strictly dominated by position 2. That leaves out collumn and row 1 and 5 (symmetric game). This gives the revised table: | | 2 | 3 | 4 | |---|------------|------------|------------| | **2** | (2.5, 2.5) | (2.0, 3.0) | (2.5, 2.5) | | **3** | (3.0, 2.0) | (2.5, 2.5) | (3.0, 2.0) | | **4** | (2.5, 2.5) | (2.0, 3.0) | (2.5, 2.5) | Again, position 2 is strictly dominated by position 3. Which leaves us with a best response in the middle. However, while the above analysis is general for any length of line, we need a $5^n$ cells to represent a $n$ drone game, which is NP hard. But, for n=3 we can try by doing the following shortcut: | | 1 | 2 | 3 | 4 | 5 | |---|------------|------------|------------|------------|------------| | **1** | $3.\overline{3}$/2 | 4/2 | 3.5/2 | 3/2 | 2.5/2 | | **2** | 1/2 | $3.\overline{3}$/2 | 3/2 | 2.5/2 | 2/2 | | **3** | 1.5/2 | 2/2 | $3.\overline{3}$/2 | 2/2 | 1.5/2 | | **4** | 2/2 | 2.5/2 | 3/2 | $3.\overline{3}$/2 | 1/2 | | **5** | 2.5/2 | 3/2 | 3.5/2 | 4/2 | $3.\overline{3}$/2 | Here, in this slightly ugly table, we've added in the payoffs for agent 3, depending on where agent 1 and 2 are positioned, using the columns. For example, if all agents go to the center, then it's 2/3 probability that the agent will be in the extreme position (cointoss), with a reward of 2.5 in each position. The other values are calculated similarily. However, this table is only valid for when agent 2 and 3 choose the same spot. The inverse of this table would be for when agent 1 and 2 choose the same spot. But it's already clear that, while position 2 strictly dominates position 1, position 3 doesn't strictly dominate position 2 anymore! In other words, as expected, the best response is no longer in the middle, and so considering this table alone would warrant a mixed strategy. At any rate, it becomes a combinatorial nightmare quite quickly. In contrast, the earlier simulation grows due to size of area (lenght of axis) and number of time units, $\tau * size^2$. However, the game matrix approach does represent a complete description, while the simulation is only an approximation limited by stochastic movement in an iterated fashion. ___ Finally, I'll discuss expanding the 2D wifi drone model to include some knowledge about other agents or the world so as to approach the social optima even further or exactly. Given that sozial optima is to divide the area in equally sized polygons, i.e. tessalation using triangles, squares, or hexagons, then one should find a rule that could aid in this. In the above simulation I've used squares in the calculations, though hexagons would probably reduce it even further with enough drones. But it would depend on the amount of drones one have. Options: 1. Implemented. Take into account average distance to drone, globally and locally. The most likely effect of this, on the local level, is to reduce the amount of "battling" or clustering between drones. Depending on weight of utility given to avg. dist. then it would provide a counter against moving towards others which is normally a good choice. This is added to the main simulation with the factor avgdistfac (adf): $utility = users * \dfrac{GAD}{AD}^{adf}$. Some simulations show that for 4 drones, this reduces avg. local distance towards the social optima, but with too high adf it increases again. Qualitatively, agents tend to center more in their coverage areas. I assume here that maximum served bandwith = minimum average distance to drones. 2. Not implemented. Aim for equal distance between the two closest drones, inspired by how bird flocks operate. This would ensure an even spread of drones depending on weight of this utility. 3. Not implemented. Movement patterns, such as spirals used in search and rescue, adjusted by number of users, avg.dist. and nearest neighbours distance. Would ensure a more targeted search over the area. Not strictly an objective value. 4. Not implemented. Scale movement speed according to objective value (with or without implementing other options) relative to max estimated objective value. This would ensure that the smaller share of the pie the drone has, the more erratic it's behavior. Could be useful to escape cornering and bad local minima. Strictly not an objective value. 5. Not implemented. Battery could be useful, as sampling involves movement which requires more energy. Given that recharging would reduce utility to 0 for a while (hypotetically), then battery could be factored into the objective value such that more battery = more exploration (if need be), and less battery = a more wait and see approach. This would potentially ensure that the game becomes more stable over time. ___ In summary, for question 4 of the excercise, game matrices represent a complete description of the game being played, but it grows in order $size^n$ where size is length of line and n is number of drones. Clearly NP hard. The decision theory approach is much simpler, but is colored by uncertainties (randomness, imperfect rules, partial information), although it scales polynomially. It might not find the Nash or the social optima, but it can approach it depending on rules and other parameters. Finally, I discuss some options for improving or adding to the objective value (utility) of the drones using for example information from avg.dist. to users, distance to other drones, and such.
github_jupyter
import numpy as np import matplotlib.pyplot as plt import scipy.signal as ss import scipy.stats as sct import copy import ipywidgets as widgets from ipywidgets import interact, interact_manual from matplotlib import animation, rc, colors from IPython.display import HTML, display @interact_manual # Interactive plot def game_plot(Position_Agent_A=(0,1000,1)): # Location of agent A, adjusted interactively!! lA = Position_Agent_A uB = [] # Utility of agent B for lB in range(0,1001): # Over each possible location of agent B if lB < lA: uB.append(lB + ((lA) - lB) / 2) if lB > lA: uB.append(1000 - lB + ((lB) - lA) / 2) if lB == lA: uB.append(500) plt.figure(); uB = np.array(uB) plt.plot(uB/1000,'b',label="Agent B") plt.plot(1-(uB/1000),'r',label="Agent A") plt.title("Utility as a function of position") plt.legend() plt.xlabel("Position Agent B") plt.ylabel("Fraction of coverage") # Finding the min through search mini = 100000 maxi = 0 loc = [0,0]; locm = [0,0] for a in range(1,101): # Cheating a bit using 100 instead of 1000. for b in range(1,101): if not a == b: t = np.mean([np.min([np.abs(i-a),np.abs(i-b)]) for i in range(1,101)]) loc = [a,b] if t < mini else loc locm = [a,b] if t > maxi else locm mini = t if t < mini else mini maxi = t if t > maxi else maxi print("Brute force Search for social welfare maximum = {:.2f} with one drone at location {} and the other at {}\n".format(mini*10,loc[0]*10,loc[1]*10)) print("and the Nash equilibria has an average distance of {:.2f} with one drone at {} an the other at {}, as proven earlier\n".format(mini*2*10,500,500)) print("The 'evil dictator optimum' however is {:.2f} at {} and {}, although one drone is not very happy.\n".format(maxi*10,locm[0],locm[1])) print("Theoretically, drones at 0 and 0 would create a absolute maximum of 500 but the simulation above is not completely accurate.") print("********************\n") plt.figure() y = [np.divide(np.mean([0,x])*x+np.mean([0,1000-x])*(1000-x),1000) for x in range(1001)] plt.plot(y,'b',label="Point on line") plt.title("Average distance to a point on the line") plt.ylabel("Average distance") plt.ylim([0,500]) plt.legend() print("Now, a little plot over dividing lines, and argmin over the mean distance, would result in the center.\n") plt.show() print("*******************\n") # Interactively print("And finally, an interactive plot showing the average distance for mobile users for different drone locations.\n") @interact_manual # Interactive plot def game_plot(Position_Agent_A=(0,1000,1),Position_Agent_B=(0,1000,1)): # Location of agent A, adjusted interactively!! lA = Position_Agent_A lB = Position_Agent_B plt.figure() plt.plot([lB,lB],[0,1],'b--',label="Agent B") plt.plot([lA,lA],[0,1],'r--',label="Agent A") plt.legend() plt.xlabel("Position") plt.ylabel("Distance to nearest drone") plt.ylim([0,500]) for i in range(1001): plt.bar([i],np.min([np.abs(i-lA),np.abs(i-lB)])) plt.title("Average distance to drones: {}".format(np.mean([np.min([np.abs(i-lA),np.abs(i-lB)]) for i in range(1,1001)]))) print("An interactive simulation. Note that with drones=10, tau=5000, size=100, it might take a up to 30 minutes.\n") print("However, I recommend running a few with time>2000, just to get better averages\n") print("in the bottom heatmap, and estimation of mean coverage and mean number of users.\n") print("Start is the kind of drone position start you want, but note that only 'Random' works for drones not equal to 4\n") print("If not-random start, have drones at 4, movefac at gauss, and speed low for best results.") print("Finally, avgdistfac is a tool for weighting the influence of avg dist to users (exponentially) relative to global optima. 0 = no effect.\n") @interact_manual def wifi_drone_game(tau=(10,5000,1), drones=(2,10,1), speed=(1,7,1), size=(10,100,1), start=["Random","Social_Optima","Center_Nash"],movefac=["uniform","gauss"],avgdistfac=(0,5,1)): # Inputs: # Tau = Time units # Drones = number of Drones # Speed = Max distance a drone can move # Size = Size of area (it's a square) m = [] # Social optima for squares for x in range(size): for y in range(size): m.append(np.sqrt((x-size/2)**2 + (y-size/2)**2)) m = np.mean(m) area = [size,size] # Size of area in each dimension start_locations = {"Random":[[np.random.randint(0,size+1),np.random.randint(0,size+1)] for i in range(drones)], # Random starting locations "Social_Optima":[[size/4,size/4],[3*size/4,3*size/4],[size/4,3*size/4],[3*size/4,size/4]], # 4 drone Social optima "Center_Nash":[[size/2 + np.random.random()/10,size/2 + np.random.random()/10], [size/2 + np.random.random()/10,size/2 + np.random.random()/10], [size/2 + np.random.random()/10,size/2 + np.random.random()/10], [size/2 + np.random.random()/10,size/2 + np.random.random()/10]], # Center Nash } class drone: # A drone def __init__(self, location): self.location = [location,[0,0]] # The drone's current and temp location in the 2D grid self.users = [0,0] # Number of current and temp users connected to the drone self.mem = [0,0] # A memory of how to get back self.avgdist = 0 def move(self,direction): # A function that moves the drone unless it would cross over boundary self.location[1] = np.add(self.location[0], direction) # Moving self.location[1] = np.add(self.location[0],self.mem) if np.sum(np.abs(self.mem)) > 0 else self.location[1] # Go back for i in range(2): # Checking boundary conditions if self.location[1][i] > size: self.location[1][i] = size elif self.location[1][i] < 0: self.location[1][i] = 0 self.mem = [-1*x for x in direction] # Reverse coordinates def update(self,adjf): # Updates position and number of users for new location self.users[1] = self.users[1]*(adjf/self.avgdist)**avgdistfac if self.users[0] < self.users[1]: # If new place is better, reset memory (agent doesn't go back) self.mem = [0,0] self.location[0] = self.location[1] self.users[0] = self.users[1] self.users[1] = 0 def best_drone(size,dronelist): # A function that updates the number of users each drone has a = np.zeros([size,size]) b = [] for x in range(size): for y in range(size): dist = [np.sqrt((x-d.location[1][0])**2 + (y-d.location[1][1])**2) for d in dronelist] a[y,x] = dist.index(min(dist)) dronelist[dist.index(min(dist))].users[1] += 1 dronelist[dist.index(min(dist))].avgdist += min(dist) b.append(min(dist)) b=np.mean(b) return a,b # Returns a map of each drone's coverage, and mean distance to drones dronelist = [drone(start_locations[start][i]) for i in range(drones)] # Creating drones # Keeping track of stuff score_hist = [] # Number of users position_hist = [] # History of each drone's location mat = [] # Coverage history mean_dist = [] # Average distance to connections # Some plotting stuff before we begin movie time! fig,ax = plt.subplots() colrs = ["r","b","k","g","y","w","brown","orange","pink","grey"] ax.axis([0,size,0,size]) im = ax.imshow(np.zeros([size,size]),vmin=0,vmax=drones) dot = ax.scatter([[0]*drones], [[0]*drones], c=colrs[0:drones], s=100, edgecolor="k") patches = [im] + [dot] def init(): im.set_data(np.zeros([size,size])) dot.set_offsets([]) return patches def animate(i): a = [x[0] for x in position_hist[i]] b = [x[1] for x in position_hist[i]] im.set_data(mat[i]) dot.set_offsets(np.c_[a,b]) return patches # Now we're ready to simulate for t in range(tau): # Each drone moves in a random direction if movefac == "uniform": [d.move([np.random.randint(-speed,speed+1),np.random.randint(-speed,speed+1)]) for d in dronelist] # Uniform movement speed distribution else: [d.move([np.random.normal(0,speed/2),np.random.normal(0,speed/2)]) for d in dronelist] # Uniform movement speed distribution # Updating scores a,b = best_drone(size,dronelist) mat.append(copy.deepcopy(a)) mean_dist.append(copy.deepcopy(b)) # Moving drone to new location if better [d.update(m/drones) for d in dronelist] # Storing results score_hist.append([d.users[0] for d in dronelist]) position_hist.append([d.location[0] for d in dronelist]) anim = animation.FuncAnimation(fig, animate, init_func=init, frames=tau, interval=100, blit=True) plt.close("all") display(HTML(anim.to_html5_video())) # Average payoffs, avg. distance, nash, etc print("*******************************\n") print("The average payoffs for the drones are {}\n".format(np.mean(score_hist,axis=0))) print("Mean distance to drones = {:.2f}, compared to Center = {:.2f}, and social optima = {:.2f} assuming area can be divided in n equal sized squares.\n".format(np.mean(mean_dist),m,m/drones)) print("********************\n") # Location map matr = np.zeros([size,size]) for x in position_hist: for y in x: matr[int(y[1])-1,int(y[0])-1] += 1 fig,ax = plt.subplots() ax.imshow(matr,cmap="hot",interpolation="nearest", origin="lower") ax.add_artist(plt.Circle((size/2,size/2),(size/2)/1.5,color="w",fill=False)) ax.add_artist(plt.Circle((size/2,size/2),(size/2)/2,color="w",fill=False)) ax.add_artist(plt.Circle((size/2,size/2),(size/2)/20,color="w",fill=False)) plt.title("Heatmap over location history") print("White circles represent estimated social optima for 3-4 drones and center\n") def best_drone(size,dronelist): # A function that updates the number of users each drone has a = np.zeros([size,size]) b = [] for x in range(size): for y in range(size): dist = [np.sqrt((x-d[0])**2 + (y-d[1])**2) for d in dronelist] a[y,x] = dist.index(min(dist)) b.append(min(dist)) b=np.mean(b) return a,b # Returns a map of each drone's coverage, and mean distance to drones print("Example of moving in grid (note: not exact counts due to grid vs. float positions and edges)") plt.figure() plt.subplot(1,3,1) size = 50 # All drones pretty much in center dronelist = [[24.9,25.1],[25.1,24.9],[25.1,25.1],[24.9,24.9]] a1,b1 = best_drone(size,dronelist) plt.imshow(a1, origin="lower") plt.scatter([d[0] for d in dronelist],[d[1] for d in dronelist], s=100, edgecolor="k") #Moving one drone a bit off plt.subplot(1,3,2) dronelist = [[24.9,25.1],[25.1,24.9],[29.1,25.1],[24.9,24.9]] a2,b2 = best_drone(size,dronelist) plt.imshow(a2, origin="lower") plt.scatter([d[0] for d in dronelist],[d[1] for d in dronelist], s=100, edgecolor="k") # Moving another drone a bit off plt.subplot(1,3,3) dronelist = [[24.9,25.1],[25.1,22.9],[29.1,25.1],[24.9,24.9]] a3,b3 = best_drone(size,dronelist) plt.imshow(a3, origin="lower") plt.scatter([d[0] for d in dronelist],[d[1] for d in dronelist], s=100, edgecolor="k") plt.show() print("Left plot shows all drones equally placed around center. This is precarious ({})!\n".format(np.unique(a1,return_counts=True)[1])) print("because middle plot shows one drone moving off and gaining a lot (diff: {})!\n".format(np.unique(a2,return_counts=True)[1]-np.unique(a1,return_counts=True)[1])) print("Which prompts the next drone to move off as well, shown in the right plot.(diff: {})!\n".format(np.unique(a3,return_counts=True)[1]-np.unique(a1,return_counts=True)[1])) plt.figure() plt.subplot(1,3,1) # All drones at social optima dronelist = [[12.5,25.],[25.,12.5],[37.5,25.],[25.,37.5]] a1,b1 = best_drone(size,dronelist) plt.imshow(a1, origin="lower") plt.scatter([d[0] for d in dronelist],[d[1] for d in dronelist], s=100, edgecolor="k") #Moving one drone a bit off plt.subplot(1,3,2) dronelist = [[10.5,25.],[25.,12.5],[37.5,25.],[25.,37.5]] a2,b2 = best_drone(size,dronelist) plt.imshow(a2, origin="lower") plt.scatter([d[0] for d in dronelist],[d[1] for d in dronelist], s=100, edgecolor="k") # Moving another drone a bit off plt.subplot(1,3,3) dronelist = [[14.5,25.],[25.,12.5],[37.5,25.],[25.,37.5]] a3,b3 = best_drone(size,dronelist) plt.imshow(a3, origin="lower") plt.scatter([d[0] for d in dronelist],[d[1] for d in dronelist], s=100, edgecolor="k") plt.show() print("Left plot shows all drones equally placed near social optima (center of triangle). This is precarious ({})!\n".format(np.unique(a1,return_counts=True)[1])) print("because middle plot shows one drone moving off and losing a lot (diff: {})!\n".format(np.unique(a2,return_counts=True)[1]-np.unique(a1,return_counts=True)[1])) print("no matter which way it goes, shown in the right plot.(diff: {})!\n".format(np.unique(a3,return_counts=True)[1]-np.unique(a1,return_counts=True)[1])) #unique(a, return_counts=True) print("Drones is number of drones, rounds is number of iterations (300 recommended), samples is how much to sample each probability dist\n") print("with 2xsize recommended, size is length of 1D line (10-20*drones recommended), and winbonus is how important it is to win, i.e. an indication\n") print("that there's little reason to improve location.") @interact_manual def wifi_dronesB(drones=(2,6,1),rounds=(1,300,1),size=(10,102,1), samples=(10,500,1),winbonus=(1,10,1)): gmat = np.zeros((drones,size))+1/size # Game matrix resmat = [] # Results def game(pos,size): # Game function score = [0]*len(pos) for i in range(size): # Checking which is neares x = [np.abs(i-y+(np.random.random()/100)) for y in pos] # Addind a tiny random to resolve stacking score[x.index(min(x))] += 1 for i in np.where(score == max(score))[0]: score[i] *= winbonus return score def normalize(mat): # Normalizes a 2D matrix for i in range(len(mat)): mat[i] /= np.sum(mat[i]) return mat # Some plotting stuff before we begin movie time! fig,ax = plt.subplots() barcol = ax.bar(np.linspace(0,size-1,size),np.mean(gmat,axis=0)) ax.set_ylim([0,1]) def init(): for i in range(len(barcol)): barcol[i].set_height(np.mean(gmat,axis=0)[i]) return barcol def animate(i): for idx in range(len(barcol)): barcol[idx].set_height(resmat[i][idx]) return barcol # Now we're ready to simulate for t in range(rounds): scores = np.zeros((drones,size)) counts = np.zeros((drones,size))+0.001 # Ten, each drone picks a location, we'll do this some times for it in range(samples): positions = [np.random.choice(size, p=gmat[i]) for i in range(drones)] # Calculate score temp = game(positions,size) # Updating probabilities for i in range(drones): scores[i,positions[i]] += temp[i] counts[i,positions[i]] += 1 # And updating our game matrix temp = np.divide(scores,counts) gmat = normalize(np.add(gmat,normalize(temp))) # And store the results resmat.append(np.mean(gmat,axis=0)) print("Probability distribution of picking a specific location, averaged over drones") anim = animation.FuncAnimation(fig, animate, init_func=init, frames=rounds, interval=100, blit=True) plt.close("all") display(HTML(anim.to_html5_video()))
0.411229
0.988268
# Full pipeline (detailed) This notebook explains the full pipeline in a detailed manner, including the preprocessing steps, the summerization steps and the classification ones. ## Loading the dataset under the Pandas Dataframe format Because Melusine operates Pandas Dataframes by applying functions to certain columns to produce new columns, the initial columns have to follow a strict naming. The basic requirement to use Melusine is to have an input e-mail DataFrame with the following columns : - body : Body of an email (single message or conversation historic) - header : Header of an email - date : Reception date of an email - from : Email address of the sender - to (optional): Email address of the recipient - label (optional): Label of the email for a classification task (examples: Business, Spam, Finance or Family) Each row correspond to a unique email. ``` from melusine.data.data_loader import load_email_data df_emails = load_email_data() df_emails.columns print('Body :') print(df_emails.body[5]) print('\n') print('Header :') print(df_emails.header[5]) print('Date :') print(df_emails.date[5]) print('From :') print(df_emails.loc[5,"from"]) print('To :') print(df_emails.to[5]) print('Label :') print(df_emails.label[5]) ``` ## Pipeline to manage transfers and replies A single email can contain several replies or transfers in its body. In this pipeline the functions applied are : - **check_mail_begin_by_transfer :** returns True if an email is a direct transfer, else False. - **update_info_for_transfer_mail :** update the columns body, header, date, from and to if the email is a direct transfer. - **add_boolean_answer :** returns True if an email is an answer, else False. - **add_boolean_transfer :** returns True if an email is transferred, else False. This pipeline will create the following new columns : - **is_begin_by_transfer (boolean) :** indicates if the email is a direct transfer, meaning the person whe tranfered a previous email has not written anything on his own. If it is the case, the body, header, date, from and to columns will be updated with the information of the transfered email. - **is_answer (boolean) :** indicates if the body contains replies from previous emails. - **is_transfer (boolean) :** indicates if the body contains transfered emails (not necesseraly a direct transfer). #### An example of a direct tranfer ``` print(df_emails.loc[0,'header']) print(df_emails.loc[0,'date']) print(df_emails.loc[0,'from']) print(df_emails.loc[0,'to']) print(df_emails.loc[0,'body']) ``` #### The pipeline ``` from melusine.utils.transformer_scheduler import TransformerScheduler from melusine.prepare_email.manage_transfer_reply import check_mail_begin_by_transfer from melusine.prepare_email.manage_transfer_reply import update_info_for_transfer_mail from melusine.prepare_email.manage_transfer_reply import add_boolean_transfer from melusine.prepare_email.manage_transfer_reply import add_boolean_answer ManageTransferReplyTransformer = TransformerScheduler( functions_scheduler=[ (check_mail_begin_by_transfer, None, ['is_begin_by_transfer']), (update_info_for_transfer_mail, None, None), (add_boolean_answer, None, ['is_answer']), (add_boolean_transfer, None, ['is_transfer']) ] ) df_emails = ManageTransferReplyTransformer.fit_transform(df_emails) df_emails.columns ``` #### An emails previously transfered directly after it has been updated ``` print(df_emails.loc[0,'is_begin_by_transfer']) print(df_emails.loc[0,'header']) print(df_emails.loc[0,'date']) print(df_emails.loc[0,'from']) print(df_emails.loc[0,'to']) print(df_emails.loc[0,'body']) ``` #### Headers of emails containing replies ``` test = df_emails[df_emails['is_answer']==True] test.header ``` #### Headers of emails containing transfers ``` test = df_emails[df_emails['is_transfer']==True] test.header ``` ## Email segmenting pipeline Each email will be segmented according to : - the different messages - the metadata, the header and the text of each messages - the type of metadata (date, from, to) - the different partos of each text (hello, greetings, footer..) In this pipeline the functions applied are : - **build_historic :** segments the different messages of the body and returns a list of dictionaries, one per message. Each dictionary has a key 'meta' to access the metadata and a key 'text' to access the text of the body. - **structure_email :** splits parts of each messages in historic, tags them (tags: Hello, Body, Greetings, etc) and segments each part of the metadata (date, from, to). The result is returned as a list of dictionaries, one per message. Each dictionary has a key 'meta' to access the metadata (itself a dictionary with keys 'date', 'from' and 'to') and a key 'text' to access the text of the body (itself a dictionary with keys 'header' and 'structured_text'). This pipeline creates the following new columns : - **structured_historic :** the list of dictionaries returned by **build_historic** function. - **structured_body :** the list of dictionaries returned by **structure_email** function. ``` from melusine.prepare_email.build_historic import build_historic from melusine.prepare_email.mail_segmenting import structure_email SegmentingTransformer = TransformerScheduler( functions_scheduler=[ (build_historic, None, ['structured_historic']), (structure_email, None, ['structured_body']) ] ) df_emails = SegmentingTransformer.fit_transform(df_emails) df_emails.columns print(df_emails.body[2]) df_emails.structured_historic[2] df_emails.structured_body[2] ``` ## Extraction and cleaning of the body of the last message Once each email segmented, the body of the last message will be extracted and cleaned. In this pipeline the functions applied are : - **extract_last_body :** returns the body of the last message of the email. - **clean_body :** returns the body of the last message of the email after cleaning. This pipeline returns the following columns : - **last_body :** the body of the last message of the email returned by **extract_last_body** function. - **clean_body :** the cleaned body of the last message of the email returned by **clean_body** function. ``` from melusine.prepare_email.body_header_extraction import extract_last_body from melusine.prepare_email.cleaning import clean_body LastBodyHeaderCleaningTransformer = TransformerScheduler( functions_scheduler=[ (extract_last_body, None, ['last_body']), (clean_body, None, ['clean_body']) ] ) df_emails = LastBodyHeaderCleaningTransformer.fit_transform(df_emails) df_emails.columns print(df_emails.body[2]) print(df_emails.last_body[2]) print(df_emails.clean_body[2]) ``` ## Applying a phraser A phraser can be passed on the body. However it first need to be trained ``` from melusine.nlp_tools.phraser import Phraser from melusine.nlp_tools.phraser import phraser_on_body ``` #### Training a phraser ``` phraser = Phraser() phraser.train(df_emails) ``` #### Applying a phraser The **phraser_on_body** function applies a phraser on the clean_body of an email. ``` PhraserTransformer = TransformerScheduler( functions_scheduler=[ (phraser_on_body, (phraser,), ['clean_body']) ] ) df_emails = PhraserTransformer.fit_transform(df_emails) ``` ## Applying a tokenizer ``` from melusine.nlp_tools.tokenizer import Tokenizer tokenizer = Tokenizer(input_column="clean_body") df_emails = tokenizer.fit_transform(df_emails) df_emails.columns print(df_emails.clean_body[2]) print(df_emails.tokens[2]) ``` ### Metadata preprocessing The metadata have to be extracted before being dummified. This pipeline extractes the following metadata : - **extension :** from the "from" column. - **dayofweek :** from the date. - **hour :** from the date. - **min :** from the date. ``` from sklearn.pipeline import Pipeline from melusine.prepare_email.metadata_engineering import MetaExtension from melusine.prepare_email.metadata_engineering import MetaDate from melusine.prepare_email.metadata_engineering import Dummifier # Pipeline to extract dummified metadata MetadataPipeline = Pipeline([ ('MetaExtension', MetaExtension()), ('MetaDate', MetaDate()), ('Dummifier', Dummifier()) ]) df_meta = MetadataPipeline.fit_transform(df_emails) df_meta.columns df_meta.head() ``` ## Keywords extraction Once a tokens column exists, keywords can be extracted. ``` from melusine.summarizer.keywords_generator import KeywordsGenerator keywords_generator = KeywordsGenerator(n_max_keywords=4) df_emails = keywords_generator.fit_transform(df_emails) df_emails.clean_body[23] df_emails.tokens[23] df_emails.keywords[23] ``` ## Classification with neural networks Melusine offers a NeuralModel class to train, save, load and use for prediction any kind of neural networks based on Keras. Predefined architectures of RNN and CNN models using the cleaned body and the metadata of the emails are also offered. #### Embeddings training Embeddings have to be pretrained on the data set to be given as arguments of the neural networks. ``` from melusine.nlp_tools.embedding import Embedding pretrained_embedding = Embedding(input_column='clean_body', workers=1, min_count=5) pretrained_embedding.train(df_emails) ``` #### Préparation de X et de y ``` import pandas as pd from sklearn.preprocessing import LabelEncoder X = pd.concat([df_emails['clean_body'],df_meta],axis=1) y = df_emails['label'] le = LabelEncoder() y = le.fit_transform(y) X.columns X.head() y ``` #### Entraînement et prédictions avec un CNN ``` from melusine.models.neural_architectures import cnn_model from melusine.models.train import NeuralModel nn_model = NeuralModel(architecture_function=cnn_model, pretrained_embedding=pretrained_embedding, text_input_column="clean_body", meta_input_list=['extension', 'dayofweek','hour', 'min'], n_epochs=10) nn_model.fit(X,y) y_res = nn_model.predict(X) y_res = le.inverse_transform(y_res) y_res ```
github_jupyter
from melusine.data.data_loader import load_email_data df_emails = load_email_data() df_emails.columns print('Body :') print(df_emails.body[5]) print('\n') print('Header :') print(df_emails.header[5]) print('Date :') print(df_emails.date[5]) print('From :') print(df_emails.loc[5,"from"]) print('To :') print(df_emails.to[5]) print('Label :') print(df_emails.label[5]) print(df_emails.loc[0,'header']) print(df_emails.loc[0,'date']) print(df_emails.loc[0,'from']) print(df_emails.loc[0,'to']) print(df_emails.loc[0,'body']) from melusine.utils.transformer_scheduler import TransformerScheduler from melusine.prepare_email.manage_transfer_reply import check_mail_begin_by_transfer from melusine.prepare_email.manage_transfer_reply import update_info_for_transfer_mail from melusine.prepare_email.manage_transfer_reply import add_boolean_transfer from melusine.prepare_email.manage_transfer_reply import add_boolean_answer ManageTransferReplyTransformer = TransformerScheduler( functions_scheduler=[ (check_mail_begin_by_transfer, None, ['is_begin_by_transfer']), (update_info_for_transfer_mail, None, None), (add_boolean_answer, None, ['is_answer']), (add_boolean_transfer, None, ['is_transfer']) ] ) df_emails = ManageTransferReplyTransformer.fit_transform(df_emails) df_emails.columns print(df_emails.loc[0,'is_begin_by_transfer']) print(df_emails.loc[0,'header']) print(df_emails.loc[0,'date']) print(df_emails.loc[0,'from']) print(df_emails.loc[0,'to']) print(df_emails.loc[0,'body']) test = df_emails[df_emails['is_answer']==True] test.header test = df_emails[df_emails['is_transfer']==True] test.header from melusine.prepare_email.build_historic import build_historic from melusine.prepare_email.mail_segmenting import structure_email SegmentingTransformer = TransformerScheduler( functions_scheduler=[ (build_historic, None, ['structured_historic']), (structure_email, None, ['structured_body']) ] ) df_emails = SegmentingTransformer.fit_transform(df_emails) df_emails.columns print(df_emails.body[2]) df_emails.structured_historic[2] df_emails.structured_body[2] from melusine.prepare_email.body_header_extraction import extract_last_body from melusine.prepare_email.cleaning import clean_body LastBodyHeaderCleaningTransformer = TransformerScheduler( functions_scheduler=[ (extract_last_body, None, ['last_body']), (clean_body, None, ['clean_body']) ] ) df_emails = LastBodyHeaderCleaningTransformer.fit_transform(df_emails) df_emails.columns print(df_emails.body[2]) print(df_emails.last_body[2]) print(df_emails.clean_body[2]) from melusine.nlp_tools.phraser import Phraser from melusine.nlp_tools.phraser import phraser_on_body phraser = Phraser() phraser.train(df_emails) PhraserTransformer = TransformerScheduler( functions_scheduler=[ (phraser_on_body, (phraser,), ['clean_body']) ] ) df_emails = PhraserTransformer.fit_transform(df_emails) from melusine.nlp_tools.tokenizer import Tokenizer tokenizer = Tokenizer(input_column="clean_body") df_emails = tokenizer.fit_transform(df_emails) df_emails.columns print(df_emails.clean_body[2]) print(df_emails.tokens[2]) from sklearn.pipeline import Pipeline from melusine.prepare_email.metadata_engineering import MetaExtension from melusine.prepare_email.metadata_engineering import MetaDate from melusine.prepare_email.metadata_engineering import Dummifier # Pipeline to extract dummified metadata MetadataPipeline = Pipeline([ ('MetaExtension', MetaExtension()), ('MetaDate', MetaDate()), ('Dummifier', Dummifier()) ]) df_meta = MetadataPipeline.fit_transform(df_emails) df_meta.columns df_meta.head() from melusine.summarizer.keywords_generator import KeywordsGenerator keywords_generator = KeywordsGenerator(n_max_keywords=4) df_emails = keywords_generator.fit_transform(df_emails) df_emails.clean_body[23] df_emails.tokens[23] df_emails.keywords[23] from melusine.nlp_tools.embedding import Embedding pretrained_embedding = Embedding(input_column='clean_body', workers=1, min_count=5) pretrained_embedding.train(df_emails) import pandas as pd from sklearn.preprocessing import LabelEncoder X = pd.concat([df_emails['clean_body'],df_meta],axis=1) y = df_emails['label'] le = LabelEncoder() y = le.fit_transform(y) X.columns X.head() y from melusine.models.neural_architectures import cnn_model from melusine.models.train import NeuralModel nn_model = NeuralModel(architecture_function=cnn_model, pretrained_embedding=pretrained_embedding, text_input_column="clean_body", meta_input_list=['extension', 'dayofweek','hour', 'min'], n_epochs=10) nn_model.fit(X,y) y_res = nn_model.predict(X) y_res = le.inverse_transform(y_res) y_res
0.34798
0.919317
``` import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, confusion_matrix import matplotlib.pyplot as plt import seaborn as sns dane=pd.read_json('customersdata.txt', lines=True) dane = pd.concat([dane.drop(['customer'], axis=1), dane['customer'].apply(pd.Series)], axis=1) customers_df = dane.drop(columns=['orders', 'paymentMethods', 'transactions']) customers_df.head() orders_df = pd.DataFrame() paymentMethods_df = pd.DataFrame() transactions_df = pd.DataFrame() for single_customer in range(0, len(dane['orders']), 1): for order in dane['orders'][single_customer]: simple_df = pd.DataFrame([order]) simple_df['customer_id'] = single_customer orders_df = orders_df.append(simple_df) for single_customer in range(0, len(dane['paymentMethods']), 1): for paymentMethod in dane['paymentMethods'][single_customer]: simple_df = pd.DataFrame([paymentMethod]) simple_df['customer_id'] = single_customer paymentMethods_df = paymentMethods_df.append(simple_df) for single_customer in range(0, len(dane['transactions']), 1): for transaction in dane['transactions'][single_customer]: simple_df = pd.DataFrame([transaction]) simple_df['customer_id'] = single_customer transactions_df = transactions_df.append(simple_df) orders_df.head() paymentMethods_df.head() transactions_df.head() print('Total number of customers: {}'.format(len(customers_df))) print('Total number of orders: {}'.format(len(orders_df))) print('Total number of payment methods: {}'.format(len(paymentMethods_df))) print('Total number of transactions: {}'.format(len(transactions_df))) print('Number of frauds in the set: {}'.format(customers_df['fraudulent'].sum())) print('Number of non-fraud in the set: {}'.format(len(customers_df) - customers_df['fraudulent'].sum())) print('Percentage of frauds in file: {}'.format(100*(customers_df['fraudulent'].sum() / len(customers_df)))) customers_df['customer_id'] = range(0, len(customers_df['fraudulent']), 1) no_order_customers = set(customers_df['customer_id']) - set(orders_df['customer_id']) print('Customerws without any orders (total: {}):\n{}'.format(len(no_order_customers), sorted(list(no_order_customers)))) no_paymentMethod_customers = set(customers_df['customer_id']) - set(paymentMethods_df['customer_id']) print('Customers without any payment method (total: {}):\n{}'.format(len(no_paymentMethod_customers), sorted(list(no_paymentMethod_customers)))) no_transactions_customers = set(customers_df['customer_id']) - set(transactions_df['customer_id']) print('Customers without any transactions (total: {}):\n{}'.format(len(no_transactions_customers), sorted(list(no_transactions_customers)))) strange_clients = no_order_customers.intersection(no_paymentMethod_customers).intersection(no_transactions_customers) print('Customers without any order, payment methods and transcations (total: {}):\n{}'.format(len(strange_clients), sorted(list(strange_clients)))) customers_df[customers_df['customer_id'].isin(strange_clients)] customers_df[customers_df['customer_id'].isin(no_order_customers.intersection(no_transactions_customers))] clients_to_delete = set(no_order_customers.union(no_paymentMethod_customers).union(no_transactions_customers)) customers_df['strange_customerEmail'] = 0 customers_df['strange_paymentMethodIssuer'] = 0 customers_df['ipv4'] = 0 customers_df['ipv6'] = 0 customers_df['repeated_customerIPAddress'] = 0 customers_df['repeated_customerEmail'] = 0 customers_df['repeated_customerBillingAddress'] = 0 customers_df['customerEmailDomainPublic'] = 0 customers_df.loc[customers_df['customerEmail'].str.contains('\.') == False, 'strange_customerEmail'] = 1 customers_df.loc[customers_df['customerIPAddress'].str.contains('\.') == True, 'ipv4'] = 1 customers_df.loc[customers_df['customerIPAddress'].str.contains('\:') == True, 'ipv6'] = 1 #what IP repeats? customers_df[customers_df.duplicated(['customerIPAddress'])].drop_duplicates()['customerIPAddress'] customers_df.loc[customers_df['customerIPAddress'].str.contains('45.203.99.249') == True, 'repeated_customerIPAddress'] = 1 #what mail repeats? customers_df[customers_df.duplicated(['customerEmail'])].drop_duplicates()['customerEmail'] customers_df.loc[customers_df['customerEmail'].str.contains('johnlowery@gmail.com') == True, 'repeated_customerEmail'] = 1 #what address repeated? customers_df[customers_df.duplicated(['customerBillingAddress'])].drop_duplicates()['customerBillingAddress'].tolist()[0] customers_df.loc[customers_df['customerBillingAddress'].str.contains('49680 Brian Squares Apt. 122\nPort Walterburgh, MH 02766-0708') == True, 'repeated_customerBillingAddress'] = 1 #checking if above works customers_df[customers_df.duplicated(['customerBillingAddress'])].drop_duplicates() #how many public and private domains? customers_df['customerEmail'].str.split('@',expand=True).reset_index().rename(columns={0:'customerEmailLogin', 1:'customerEmailDomain'}).groupby('customerEmailDomain').count().sort_values('customerEmailLogin', ascending=False).head(5) customers_df.loc[customers_df['customerEmail'].str.contains('gmail.com|yahoo.com|hotmail.com') == True, 'customerEmailDomainPublic'] = 1 orders_df['orderAmountCategory'] = pd.cut(orders_df['orderAmount'], bins=[0,33,66,100], labels=[1,2,3]).fillna(3).tolist() orders_by_amountCategory = orders_df.groupby(['customer_id', 'orderAmountCategory'])['orderId'].count().unstack().add_prefix('orderAmountCategory_').fillna(0).reset_index() orders_by_amountCategory.head() orders_by_state = orders_df.groupby(['customer_id', 'orderState'])['orderId'].count().unstack().fillna(0).add_prefix('orders_').reset_index() orders_by_state.head() orders_value_sum = orders_df.groupby(['customer_id'])['orderAmount'].sum().reset_index().rename(columns={'orderAmount': 'orders_value_sum'}) orders_value_sum.head() orders_all = orders_df.groupby(['customer_id'])['orderId'].count().reset_index().rename(columns={'orderId': 'orders_sum'}) orders_all.head() orders_repeatedAddress = orders_df.drop_duplicates('orderShippingAddress').groupby(['customer_id']).count().reset_index()[['customer_id', 'orderAmount']].rename(columns={'orderAmount': 'unique_orderShippingAddress'}) orders_repeatedAddress.head() #check if there are customers with outlining orders outlier_clients_to_delete = orders_df[orders_df['orderAmount'] > orders_df['orderAmount'].quantile(0.999)]['customer_id'].tolist() orders_df[orders_df['customer_id'].isin(outlier_clients_to_delete)] #strange payment institution strange_paymentMethodIssuer_customer_ids = paymentMethods_df[paymentMethods_df['paymentMethodIssuer'].str.len() == 1]['customer_id'].drop_duplicates().tolist() customers_df.loc[customers_df['customer_id'].isin(strange_paymentMethodIssuer_customer_ids), 'strange_paymentMethodIssuer'] = 1 test_df = paymentMethods_df.drop_duplicates(['customer_id', 'paymentMethodIssuer']) paymentMethods_issuers = test_df.groupby('customer_id').paymentMethodIssuer.size().reset_index().rename(columns={'paymentMethodIssuer': 'unique_paymentMethodIssuer'}) paymentMethods_issuers.head() test_df2 = paymentMethods_df.drop_duplicates(['customer_id', 'paymentMethodProvider']) paymentMethods_providers = test_df2.groupby('customer_id').paymentMethodProvider.size().reset_index().rename(columns={'paymentMethodProvider': 'unique_paymentMethodProvider'}) paymentMethods_providers.head() paymentMethods_registrations = paymentMethods_df.groupby(['customer_id', 'paymentMethodType'])['paymentMethodId'].count().unstack().fillna(0).add_prefix('paymentMethod_').reset_index() paymentMethods_registrations.head() paymentMethods_types = paymentMethods_df.groupby(['customer_id', 'paymentMethodRegistrationFailure'])['paymentMethodId'].count().unstack().fillna(0).add_prefix('paymentMethod_').reset_index() paymentMethods_types.head() paymentMethods_all = paymentMethods_df.groupby(['customer_id'])['paymentMethodId'].count().reset_index().rename(columns={'paymentMethodId': 'paymentMethod_all'}) paymentMethods_all.head() transactions_df['transactionAmountCategory'] = pd.cut(transactions_df['transactionAmount'], bins=[0,33,66,100], labels=[1,2,3]).fillna(3).tolist() transactions_by_transactionAmount = transactions_df.groupby(['customer_id', 'transactionAmountCategory'])['transactionId'].count().unstack().add_prefix('transactionAmountCategory_').fillna(0).reset_index() transactions_by_transactionAmount.head() transactions_by_state = transactions_df.groupby(['customer_id', 'transactionFailed'])['transactionId'].count().unstack().fillna(0).add_prefix('transactions_').reset_index() transactions_by_state.head() transactions_value_sum = transactions_df.groupby(['customer_id'])['transactionAmount'].sum().reset_index().rename(columns={'transactionAmount': 'transactions_value_sum'}) transactions_value_sum.head() transactions_all = transactions_df.groupby(['customer_id'])['transactionId'].count().reset_index().rename(columns={'transactionId': 'transaction_all'}) transactions_all.head() #checking client without outliner transaction outlier_transactions_clients_to_delete = transactions_df[transactions_df['transactionAmount'] > transactions_df['transactionAmount'].quantile(0.999)]['customer_id'].tolist() transactions_df[transactions_df['customer_id'].isin(outlier_transactions_clients_to_delete)] #Creating a comprehensive set with variables for the model features_df = customers_df[['customer_id', 'fraudulent', 'strange_customerEmail',\ 'strange_paymentMethodIssuer', 'ipv4', 'ipv6',\ 'repeated_customerIPAddress', 'repeated_customerEmail',\ 'repeated_customerBillingAddress', 'customerEmailDomainPublic']] features_df.head() feature_dfs = [orders_by_state, orders_value_sum, orders_all, paymentMethods_registrations,\ paymentMethods_types, paymentMethods_all, transactions_by_state,\ transactions_value_sum, transactions_all, orders_repeatedAddress,\ paymentMethods_issuers, paymentMethods_providers, orders_by_amountCategory,\ transactions_by_transactionAmount ] for single_feature in feature_dfs: features_df = pd.merge(features_df, single_feature, how='left', on='customer_id') #removing uncomfortable customers features_df_clean = features_df[~features_df['customer_id'].isin(set(list(clients_to_delete) + outlier_transactions_clients_to_delete + outlier_clients_to_delete))] features_df_clean.head() #checking if everything is well harmonized features_df_clean.isna().sum() sns.distplot(dane.fraudulent) sns.countplot(dane.fraudulent) ``` Description of model component variables Independent variable: fraudulent - whether the customer is a fraud or not Dependent variables (aggregated from various sources to the base level of a particular customer): customers collection: _strange_customerEmail_ - is the e-mail address provided by the customer strange (no format) _strange_paymentMethodIssuer_ - is the issuer of the payment method weird (single letter names) ipv4 - is the IP in ipv4 format? ipv6 - is the IP in ipv6 format? _repeated_customerIPAddress_ - whether the IP assigned to this client is not unique (i.e. it was also assigned to other clients) _repeated_customerEmail_ - is the email assigned to this client unique (i.e. was also assigned to other clients) _repeated_customerBillingAddress_ - whether the address assigned to this customer is not unique (i.e. was also assigned to other customers) customerEmailDomainPublic - is the domain from which the email originates "trusted" orders set: _orders_failed_ - total number of orders with status failed _orders_fulfilled_ - total number of orders with the status of fulfilled _orders_pending_ -total number of orders with pending status _orders_value_sum_ - total value of orders _orders_sum_ - total number of orders _orderAmountCategory_1_ - total number of orders between 0 and 33 (3) percentile _orderAmountCategory_2_ - total number of orders with a value between 34-66. (6) percentile _orderAmountCategory_3_ - total number of orders with a value between 67-100 percentile _unique_orderShippingAddress_ - total number of unique addresses collection of payments: _paymentMethod_apple pay_ - total number of apple pay payment methods _paymentMethod_bitcoin_ - total number of bitcoin payment methods _paymentMethod_card_ - total number of card payment methods _paymentMethod_paypal_ - total number of paypal payment methods _paymentMethod_False_ - total number of payment methods with False status _paymentMethod_True_ - total number of payment methods with True status _paymentMethod_all_ - total number of payment methods _unique_paymentMethodIssuer_ - total number of unique payment issuers _unique_paymentMethodProvider_ - total number of unique payment types set of transactions: _transactions_False_ - total number of transactions with False status _transactions_True_ - total number of transactions with True status _transactions_value_sum_ - total value of the transaction _transaction_all_ - total number of transactions _transactionAmountCategory_1_ - total number of transactions between 0-33. (3) percentile _transactionAmountCategory_2_ - total number of transactions with a value between 34-66. (6) percentile _transactionAmountCategory_3_ - total number of transactions with values ​​between 67-100 percentile Creating Model ``` labels = np.array(features_df_clean['fraudulent']) features = features_df_clean.loc[:, 'strange_customerEmail':'transactionAmountCategory_3'] feature_list = list(features.columns) features = np.array(features) # Split the data into training and testing sets train_x, test_x, train_y, test_y = train_test_split(features, labels, test_size = 0.25, random_state = 23) rf = RandomForestClassifier(n_estimators = 1000, random_state = 23) rf.fit(train_x, train_y) predictions = rf.predict(test_x) 'Model relevance on training set: {}'.format(accuracy_score(train_y, rf.predict(train_x))) 'Model relevance on test set: {}'.format(accuracy_score(test_y, predictions)) ax = plt.subplot() sns.heatmap(confusion_matrix(test_y, predictions), annot=True, ax = ax) ax.set_xlabel('Predicted categories') ax.set_ylabel('Real categories') ax.set_title('Error matrix (test file)\n') ax.xaxis.set_ticklabels(['Not-fraud', 'Fraud']) ax.yaxis.set_ticklabels(['Not-fraud', 'Fraud']) importances = list(rf.feature_importances_) feature_importances = [(feature, round(importance, 2)) for feature, importance in zip(feature_list, importances)] importances_df = pd.DataFrame(feature_importances).rename(columns={0: 'variable', 1:'importance'}).sort_values('importance', ascending=True).set_index('variable') ax = importances_df.plot.barh(figsize=(8,10), title='The degree of contribution of individual variables in the model\n') ax.set_xlabel('Contribution rate') ax.set_ylabel('Model Variable') ax.get_legend().remove() for p in ax.patches: ax.annotate("%.2f" % p.get_width(), (p.get_x() + p.get_width(), p.get_y()-0.5), xytext=(5, 10), textcoords='offset points') ax.spines["top"].set_visible(False) ax.spines["right"].set_visible(False) ax.spines["bottom"].set_visible(False) ``` Comment on the chart Most of the variables significantly contributing to the classification model are based on data on transactions, orders or payment type : _orders_value_sum_ - total value of orders _paymentMethod_all_ - total number of payment methods _paymentMethod_False_ - total number of payment methods with False status _transactions_value_sum_ - total value of the transaction _unique_paymentMethodIssuer_ - total number of unique payment issuers _unique_paymentMethodProvider_ - total number of unique payment types In fact, fewer variables that contribute significantly to the model describe the customer himself: _repeated_customerEmail_ - is the email assigned to this client unique (i.e. was also assigned to other clients) On this basis, it can be assumed that catching fraudulent transactions should generally be based more on searching for financial anomalies than describing the characteristics of the client. It is also worth noting that the variable _strange_paymentMethodIssuer_ most contributes to the model - its presence results from the analysis carried out at the same level of data mining (already then you could already have suspicions as to customers who have one-letter entries in the field). It is also worth noting that another variable was created at a similar stage of the analysis (_strange_CustomerEmail_; e-mail address provided in an incorrect form) does not continue so significantly to the model.
github_jupyter
import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, confusion_matrix import matplotlib.pyplot as plt import seaborn as sns dane=pd.read_json('customersdata.txt', lines=True) dane = pd.concat([dane.drop(['customer'], axis=1), dane['customer'].apply(pd.Series)], axis=1) customers_df = dane.drop(columns=['orders', 'paymentMethods', 'transactions']) customers_df.head() orders_df = pd.DataFrame() paymentMethods_df = pd.DataFrame() transactions_df = pd.DataFrame() for single_customer in range(0, len(dane['orders']), 1): for order in dane['orders'][single_customer]: simple_df = pd.DataFrame([order]) simple_df['customer_id'] = single_customer orders_df = orders_df.append(simple_df) for single_customer in range(0, len(dane['paymentMethods']), 1): for paymentMethod in dane['paymentMethods'][single_customer]: simple_df = pd.DataFrame([paymentMethod]) simple_df['customer_id'] = single_customer paymentMethods_df = paymentMethods_df.append(simple_df) for single_customer in range(0, len(dane['transactions']), 1): for transaction in dane['transactions'][single_customer]: simple_df = pd.DataFrame([transaction]) simple_df['customer_id'] = single_customer transactions_df = transactions_df.append(simple_df) orders_df.head() paymentMethods_df.head() transactions_df.head() print('Total number of customers: {}'.format(len(customers_df))) print('Total number of orders: {}'.format(len(orders_df))) print('Total number of payment methods: {}'.format(len(paymentMethods_df))) print('Total number of transactions: {}'.format(len(transactions_df))) print('Number of frauds in the set: {}'.format(customers_df['fraudulent'].sum())) print('Number of non-fraud in the set: {}'.format(len(customers_df) - customers_df['fraudulent'].sum())) print('Percentage of frauds in file: {}'.format(100*(customers_df['fraudulent'].sum() / len(customers_df)))) customers_df['customer_id'] = range(0, len(customers_df['fraudulent']), 1) no_order_customers = set(customers_df['customer_id']) - set(orders_df['customer_id']) print('Customerws without any orders (total: {}):\n{}'.format(len(no_order_customers), sorted(list(no_order_customers)))) no_paymentMethod_customers = set(customers_df['customer_id']) - set(paymentMethods_df['customer_id']) print('Customers without any payment method (total: {}):\n{}'.format(len(no_paymentMethod_customers), sorted(list(no_paymentMethod_customers)))) no_transactions_customers = set(customers_df['customer_id']) - set(transactions_df['customer_id']) print('Customers without any transactions (total: {}):\n{}'.format(len(no_transactions_customers), sorted(list(no_transactions_customers)))) strange_clients = no_order_customers.intersection(no_paymentMethod_customers).intersection(no_transactions_customers) print('Customers without any order, payment methods and transcations (total: {}):\n{}'.format(len(strange_clients), sorted(list(strange_clients)))) customers_df[customers_df['customer_id'].isin(strange_clients)] customers_df[customers_df['customer_id'].isin(no_order_customers.intersection(no_transactions_customers))] clients_to_delete = set(no_order_customers.union(no_paymentMethod_customers).union(no_transactions_customers)) customers_df['strange_customerEmail'] = 0 customers_df['strange_paymentMethodIssuer'] = 0 customers_df['ipv4'] = 0 customers_df['ipv6'] = 0 customers_df['repeated_customerIPAddress'] = 0 customers_df['repeated_customerEmail'] = 0 customers_df['repeated_customerBillingAddress'] = 0 customers_df['customerEmailDomainPublic'] = 0 customers_df.loc[customers_df['customerEmail'].str.contains('\.') == False, 'strange_customerEmail'] = 1 customers_df.loc[customers_df['customerIPAddress'].str.contains('\.') == True, 'ipv4'] = 1 customers_df.loc[customers_df['customerIPAddress'].str.contains('\:') == True, 'ipv6'] = 1 #what IP repeats? customers_df[customers_df.duplicated(['customerIPAddress'])].drop_duplicates()['customerIPAddress'] customers_df.loc[customers_df['customerIPAddress'].str.contains('45.203.99.249') == True, 'repeated_customerIPAddress'] = 1 #what mail repeats? customers_df[customers_df.duplicated(['customerEmail'])].drop_duplicates()['customerEmail'] customers_df.loc[customers_df['customerEmail'].str.contains('johnlowery@gmail.com') == True, 'repeated_customerEmail'] = 1 #what address repeated? customers_df[customers_df.duplicated(['customerBillingAddress'])].drop_duplicates()['customerBillingAddress'].tolist()[0] customers_df.loc[customers_df['customerBillingAddress'].str.contains('49680 Brian Squares Apt. 122\nPort Walterburgh, MH 02766-0708') == True, 'repeated_customerBillingAddress'] = 1 #checking if above works customers_df[customers_df.duplicated(['customerBillingAddress'])].drop_duplicates() #how many public and private domains? customers_df['customerEmail'].str.split('@',expand=True).reset_index().rename(columns={0:'customerEmailLogin', 1:'customerEmailDomain'}).groupby('customerEmailDomain').count().sort_values('customerEmailLogin', ascending=False).head(5) customers_df.loc[customers_df['customerEmail'].str.contains('gmail.com|yahoo.com|hotmail.com') == True, 'customerEmailDomainPublic'] = 1 orders_df['orderAmountCategory'] = pd.cut(orders_df['orderAmount'], bins=[0,33,66,100], labels=[1,2,3]).fillna(3).tolist() orders_by_amountCategory = orders_df.groupby(['customer_id', 'orderAmountCategory'])['orderId'].count().unstack().add_prefix('orderAmountCategory_').fillna(0).reset_index() orders_by_amountCategory.head() orders_by_state = orders_df.groupby(['customer_id', 'orderState'])['orderId'].count().unstack().fillna(0).add_prefix('orders_').reset_index() orders_by_state.head() orders_value_sum = orders_df.groupby(['customer_id'])['orderAmount'].sum().reset_index().rename(columns={'orderAmount': 'orders_value_sum'}) orders_value_sum.head() orders_all = orders_df.groupby(['customer_id'])['orderId'].count().reset_index().rename(columns={'orderId': 'orders_sum'}) orders_all.head() orders_repeatedAddress = orders_df.drop_duplicates('orderShippingAddress').groupby(['customer_id']).count().reset_index()[['customer_id', 'orderAmount']].rename(columns={'orderAmount': 'unique_orderShippingAddress'}) orders_repeatedAddress.head() #check if there are customers with outlining orders outlier_clients_to_delete = orders_df[orders_df['orderAmount'] > orders_df['orderAmount'].quantile(0.999)]['customer_id'].tolist() orders_df[orders_df['customer_id'].isin(outlier_clients_to_delete)] #strange payment institution strange_paymentMethodIssuer_customer_ids = paymentMethods_df[paymentMethods_df['paymentMethodIssuer'].str.len() == 1]['customer_id'].drop_duplicates().tolist() customers_df.loc[customers_df['customer_id'].isin(strange_paymentMethodIssuer_customer_ids), 'strange_paymentMethodIssuer'] = 1 test_df = paymentMethods_df.drop_duplicates(['customer_id', 'paymentMethodIssuer']) paymentMethods_issuers = test_df.groupby('customer_id').paymentMethodIssuer.size().reset_index().rename(columns={'paymentMethodIssuer': 'unique_paymentMethodIssuer'}) paymentMethods_issuers.head() test_df2 = paymentMethods_df.drop_duplicates(['customer_id', 'paymentMethodProvider']) paymentMethods_providers = test_df2.groupby('customer_id').paymentMethodProvider.size().reset_index().rename(columns={'paymentMethodProvider': 'unique_paymentMethodProvider'}) paymentMethods_providers.head() paymentMethods_registrations = paymentMethods_df.groupby(['customer_id', 'paymentMethodType'])['paymentMethodId'].count().unstack().fillna(0).add_prefix('paymentMethod_').reset_index() paymentMethods_registrations.head() paymentMethods_types = paymentMethods_df.groupby(['customer_id', 'paymentMethodRegistrationFailure'])['paymentMethodId'].count().unstack().fillna(0).add_prefix('paymentMethod_').reset_index() paymentMethods_types.head() paymentMethods_all = paymentMethods_df.groupby(['customer_id'])['paymentMethodId'].count().reset_index().rename(columns={'paymentMethodId': 'paymentMethod_all'}) paymentMethods_all.head() transactions_df['transactionAmountCategory'] = pd.cut(transactions_df['transactionAmount'], bins=[0,33,66,100], labels=[1,2,3]).fillna(3).tolist() transactions_by_transactionAmount = transactions_df.groupby(['customer_id', 'transactionAmountCategory'])['transactionId'].count().unstack().add_prefix('transactionAmountCategory_').fillna(0).reset_index() transactions_by_transactionAmount.head() transactions_by_state = transactions_df.groupby(['customer_id', 'transactionFailed'])['transactionId'].count().unstack().fillna(0).add_prefix('transactions_').reset_index() transactions_by_state.head() transactions_value_sum = transactions_df.groupby(['customer_id'])['transactionAmount'].sum().reset_index().rename(columns={'transactionAmount': 'transactions_value_sum'}) transactions_value_sum.head() transactions_all = transactions_df.groupby(['customer_id'])['transactionId'].count().reset_index().rename(columns={'transactionId': 'transaction_all'}) transactions_all.head() #checking client without outliner transaction outlier_transactions_clients_to_delete = transactions_df[transactions_df['transactionAmount'] > transactions_df['transactionAmount'].quantile(0.999)]['customer_id'].tolist() transactions_df[transactions_df['customer_id'].isin(outlier_transactions_clients_to_delete)] #Creating a comprehensive set with variables for the model features_df = customers_df[['customer_id', 'fraudulent', 'strange_customerEmail',\ 'strange_paymentMethodIssuer', 'ipv4', 'ipv6',\ 'repeated_customerIPAddress', 'repeated_customerEmail',\ 'repeated_customerBillingAddress', 'customerEmailDomainPublic']] features_df.head() feature_dfs = [orders_by_state, orders_value_sum, orders_all, paymentMethods_registrations,\ paymentMethods_types, paymentMethods_all, transactions_by_state,\ transactions_value_sum, transactions_all, orders_repeatedAddress,\ paymentMethods_issuers, paymentMethods_providers, orders_by_amountCategory,\ transactions_by_transactionAmount ] for single_feature in feature_dfs: features_df = pd.merge(features_df, single_feature, how='left', on='customer_id') #removing uncomfortable customers features_df_clean = features_df[~features_df['customer_id'].isin(set(list(clients_to_delete) + outlier_transactions_clients_to_delete + outlier_clients_to_delete))] features_df_clean.head() #checking if everything is well harmonized features_df_clean.isna().sum() sns.distplot(dane.fraudulent) sns.countplot(dane.fraudulent) labels = np.array(features_df_clean['fraudulent']) features = features_df_clean.loc[:, 'strange_customerEmail':'transactionAmountCategory_3'] feature_list = list(features.columns) features = np.array(features) # Split the data into training and testing sets train_x, test_x, train_y, test_y = train_test_split(features, labels, test_size = 0.25, random_state = 23) rf = RandomForestClassifier(n_estimators = 1000, random_state = 23) rf.fit(train_x, train_y) predictions = rf.predict(test_x) 'Model relevance on training set: {}'.format(accuracy_score(train_y, rf.predict(train_x))) 'Model relevance on test set: {}'.format(accuracy_score(test_y, predictions)) ax = plt.subplot() sns.heatmap(confusion_matrix(test_y, predictions), annot=True, ax = ax) ax.set_xlabel('Predicted categories') ax.set_ylabel('Real categories') ax.set_title('Error matrix (test file)\n') ax.xaxis.set_ticklabels(['Not-fraud', 'Fraud']) ax.yaxis.set_ticklabels(['Not-fraud', 'Fraud']) importances = list(rf.feature_importances_) feature_importances = [(feature, round(importance, 2)) for feature, importance in zip(feature_list, importances)] importances_df = pd.DataFrame(feature_importances).rename(columns={0: 'variable', 1:'importance'}).sort_values('importance', ascending=True).set_index('variable') ax = importances_df.plot.barh(figsize=(8,10), title='The degree of contribution of individual variables in the model\n') ax.set_xlabel('Contribution rate') ax.set_ylabel('Model Variable') ax.get_legend().remove() for p in ax.patches: ax.annotate("%.2f" % p.get_width(), (p.get_x() + p.get_width(), p.get_y()-0.5), xytext=(5, 10), textcoords='offset points') ax.spines["top"].set_visible(False) ax.spines["right"].set_visible(False) ax.spines["bottom"].set_visible(False)
0.308607
0.254508
# SQL and Pandas DataFrames Google Cloud Datalab allows you to use SQL to interact with Google BigQuery. However, SQL is just the starting point. Datalab enables you to combine the power of declarative SQL with imperative code (Python) to perform interesting data analysis, visualization, and transformation tasks. You can use a number of Python data analysis, data wrangling, and visualization libraries, such as `numpy`, `pandas`, `matplotlib`, and many others. Several of these libraries build on top of a `DataFrame` object. This notebook shows how to easily integrate these capabilites together in a single notebook. This functionality is provided by the BigQuery APIs, so the first step is, of course, to import the module along with pandas. ``` import google.datalab.bigquery as bq import pandas as pd ``` # From SQL to DataFrames In the [BigQuery APIs notebook](https://8081-dot-12652225-dot-devshell.appspot.com/notebooks/datalab/docs/tutorials/BigQuery/BigQuery%20APIs.ipynb), we've covered how to define a SQL query, execute it, and get a reference to the results in the form of a `DataFrame`. Let's start with a query to see what more we can do. ``` %%bq query -n requests SELECT timestamp, latency, endpoint FROM `cloud-datalab-samples.httplogs.logs_20140615` WHERE endpoint = 'Popular' OR endpoint = 'Recent' %%bq sample --count 5 --query requests df = requests.execute(output_options=bq.QueryOutput.dataframe()).result() len(df) ``` ## Data Manipulation The resulting `DataFrame` can be further transformed, sliced, projected, and inspected using the extensive set of APIs available on the class. Let's print out the first five rows. ``` df.head(5) ``` Or it can be inspected for schema, ``` df.dtypes ``` or further transformed locally, for example to perform grouping, ``` groups = df.groupby('endpoint') groups.dtypes for name, df_group in groups: print('%s - %d items' % (name, len(df_group))) print(df_group.head(3)) print() ``` and then analyze a dimension per group, ``` groups['latency'].describe() ``` or even run a set of custom aggregation functions. ``` def unique(x): return sorted(set(list(x))) groups['latency'].agg({ 'list': lambda x: list(x), 'unique': lambda x: unique(x) }) ``` ## Data Visualization DataFrame provides built-in visualization capabilities using `matplotlib`. **Conversion to TimeSeries Data** Our DataFrame instance contains timestamps, latencies, and endpoints. Let's reshape this DataFrame so that each endpoint is a column, that is, an independent series. The timestamp, itself, will be the index row. For timestamps for which one of the endpoints has no data point, we'll use the previous value. ``` df_series = df.pivot(index='timestamp', columns='endpoint', values='latency').fillna(method = 'backfill') df_series[10:20] len(df_series) df_series.plot(logy = True) ``` **Resampling** The plot above is not meaningful. Our time-series data is taken arbitrary timestamps, whenever a request was processed. It is irregularly spaced, and there are a large number of data points. DataFrames provide the ability to resample a time-series into more meaningful time windows. ``` df_series.resample(rule='10min').mean().plot(logy = True) ``` # Looking Ahead Python pandas provide an extensive toolbox of capabilities for working with data. Datalab combines the power of BigQuery and DataFrames. Subsequent notebooks cover additional SQL capabilities that you to use the full power of BigQuery, allowing you to run queries close to your entire dataset before bringing a useful subset of data into the notebook.
github_jupyter
import google.datalab.bigquery as bq import pandas as pd %%bq query -n requests SELECT timestamp, latency, endpoint FROM `cloud-datalab-samples.httplogs.logs_20140615` WHERE endpoint = 'Popular' OR endpoint = 'Recent' %%bq sample --count 5 --query requests df = requests.execute(output_options=bq.QueryOutput.dataframe()).result() len(df) df.head(5) df.dtypes groups = df.groupby('endpoint') groups.dtypes for name, df_group in groups: print('%s - %d items' % (name, len(df_group))) print(df_group.head(3)) print() groups['latency'].describe() def unique(x): return sorted(set(list(x))) groups['latency'].agg({ 'list': lambda x: list(x), 'unique': lambda x: unique(x) }) df_series = df.pivot(index='timestamp', columns='endpoint', values='latency').fillna(method = 'backfill') df_series[10:20] len(df_series) df_series.plot(logy = True) df_series.resample(rule='10min').mean().plot(logy = True)
0.373419
0.991538
# Inference and Validation Now that you have a trained network, you can use it for making predictions. This is typically called **inference**, a term borrowed from statistics. However, neural networks have a tendency to perform *too well* on the training data and aren't able to generalize to data that hasn't been seen before. This is called **overfitting** and it impairs inference performance. To test for overfitting while training, we measure the performance on data not in the training set called the **validation** set. We avoid overfitting through regularization such as dropout while monitoring the validation performance during training. In this notebook, I'll show you how to do this in PyTorch. As usual, let's start by loading the dataset through torchvision. You'll learn more about torchvision and loading data in a later part. This time we'll be taking advantage of the test set which you can get by setting `train=False` here: ```python testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) ``` The test set contains images just like the training set. Typically you'll see 10-20% of the original dataset held out for testing and validation with the rest being used for training. ``` import torch from torchvision import datasets, transforms # Define a transform to normalize the data transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]) # Download and load the training data trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) # Download and load the test data testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) ``` Here I'll create a model like normal, using the same one from my solution for part 4. ``` from torch import nn, optim import torch.nn.functional as F class Classifier(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(784, 256) self.fc2 = nn.Linear(256, 128) self.fc3 = nn.Linear(128, 64) self.fc4 = nn.Linear(64, 10) def forward(self, x): # make sure input tensor is flattened x = x.view(x.shape[0], -1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = F.relu(self.fc3(x)) x = F.log_softmax(self.fc4(x), dim=1) return x ``` The goal of validation is to measure the model's performance on data that isn't part of the training set. Performance here is up to the developer to define though. Typically this is just accuracy, the percentage of classes the network predicted correctly. Other options are [precision and recall](https://en.wikipedia.org/wiki/Precision_and_recall#Definition_(classification_context)) and top-5 error rate. We'll focus on accuracy here. First I'll do a forward pass with one batch from the test set. ``` model = Classifier() images, labels = next(iter(testloader)) # Get the class probabilities ps = torch.exp(model(images)) # Make sure the shape is appropriate, we should get 10 class probabilities for 64 examples print(ps.shape) ``` With the probabilities, we can get the most likely class using the `ps.topk` method. This returns the $k$ highest values. Since we just want the most likely class, we can use `ps.topk(1)`. This returns a tuple of the top-$k$ values and the top-$k$ indices. If the highest value is the fifth element, we'll get back 4 as the index. ``` top_p, top_class = ps.topk(1, dim=1) # Look at the most likely classes for the first 10 examples print(top_class[:10,:]) ``` Now we can check if the predicted classes match the labels. This is simple to do by equating `top_class` and `labels`, but we have to be careful of the shapes. Here `top_class` is a 2D tensor with shape `(64, 1)` while `labels` is 1D with shape `(64)`. To get the equality to work out the way we want, `top_class` and `labels` must have the same shape. If we do ```python equals = top_class == labels ``` `equals` will have shape `(64, 64)`, try it yourself. What it's doing is comparing the one element in each row of `top_class` with each element in `labels` which returns 64 True/False boolean values for each row. ``` equals = top_class == labels.view(*top_class.shape) ``` Now we need to calculate the percentage of correct predictions. `equals` has binary values, either 0 or 1. This means that if we just sum up all the values and divide by the number of values, we get the percentage of correct predictions. This is the same operation as taking the mean, so we can get the accuracy with a call to `torch.mean`. If only it was that simple. If you try `torch.mean(equals)`, you'll get an error ``` RuntimeError: mean is not implemented for type torch.ByteTensor ``` This happens because `equals` has type `torch.ByteTensor` but `torch.mean` isn't implemented for tensors with that type. So we'll need to convert `equals` to a float tensor. Note that when we take `torch.mean` it returns a scalar tensor, to get the actual value as a float we'll need to do `accuracy.item()`. ``` accuracy = torch.mean(equals.type(torch.FloatTensor)) print(f'Accuracy: {accuracy.item()*100}%') ``` The network is untrained so it's making random guesses and we should see an accuracy around 10%. Now let's train our network and include our validation pass so we can measure how well the network is performing on the test set. Since we're not updating our parameters in the validation pass, we can speed up our code by turning off gradients using `torch.no_grad()`: ```python # turn off gradients with torch.no_grad(): # validation pass here for images, labels in testloader: ... ``` >**Exercise:** Implement the validation loop below and print out the total accuracy after the loop. You can largely copy and paste the code from above, but I suggest typing it in because writing it out yourself is essential for building the skill. In general you'll always learn more by typing it rather than copy-pasting. You should be able to get an accuracy above 80%. ``` model = Classifier() criterion = nn.NLLLoss() optimizer = optim.Adam(model.parameters(), lr=0.003) epochs = 30 steps = 0 train_losses, test_losses = [], [] for e in range(epochs): running_loss = 0 for images, labels in trainloader: optimizer.zero_grad() log_ps = model(images) loss = criterion(log_ps, labels) loss.backward() optimizer.step() running_loss += loss.item() else: ## Implement the validation pass and print out the validation accuracy # Load next images and labels in the text loader images, labels = next(iter(testloader)) # Return the probabilities for the images model ps = torch.exp(model(images)) # Return top % and class - using topk top_p, top_class = ps.topk(1, dim=1) # Check where the top class is equal to the label - creates binary values - 0 or 1 # Returns 64 true/false boolean values equals = top_class == labels.view(*top_class.shape) # Accuracy is the mean of the correct values # Convert to FLoatTensor first before returning the mean accuracy = torch.mean(equals.type(torch.FloatTensor)) print(f'Accuracy: {accuracy.item()*100}%') ``` ## Overfitting If we look at the training and validation losses as we train the network, we can see a phenomenon known as overfitting. <img src='assets/overfitting.png' width=450px> The network learns the training set better and better, resulting in lower training losses. However, it starts having problems generalizing to data outside the training set leading to the validation loss increasing. The ultimate goal of any deep learning model is to make predictions on new data, so we should strive to get the lowest validation loss possible. One option is to use the version of the model with the lowest validation loss, here the one around 8-10 training epochs. This strategy is called *early-stopping*. In practice, you'd save the model frequently as you're training then later choose the model with the lowest validation loss. The most common method to reduce overfitting (outside of early-stopping) is *dropout*, where we randomly drop input units. This forces the network to share information between weights, increasing it's ability to generalize to new data. Adding dropout in PyTorch is straightforward using the [`nn.Dropout`](https://pytorch.org/docs/stable/nn.html#torch.nn.Dropout) module. ```python class Classifier(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(784, 256) self.fc2 = nn.Linear(256, 128) self.fc3 = nn.Linear(128, 64) self.fc4 = nn.Linear(64, 10) # Dropout module with 0.2 drop probability self.dropout = nn.Dropout(p=0.2) def forward(self, x): # make sure input tensor is flattened x = x.view(x.shape[0], -1) # Now with dropout x = self.dropout(F.relu(self.fc1(x))) x = self.dropout(F.relu(self.fc2(x))) x = self.dropout(F.relu(self.fc3(x))) # output so no dropout here x = F.log_softmax(self.fc4(x), dim=1) return x ``` During training we want to use dropout to prevent overfitting, but during inference we want to use the entire network. So, we need to turn off dropout during validation, testing, and whenever we're using the network to make predictions. To do this, you use `model.eval()`. This sets the model to evaluation mode where the dropout probability is 0. You can turn dropout back on by setting the model to train mode with `model.train()`. In general, the pattern for the validation loop will look like this, where you turn off gradients, set the model to evaluation mode, calculate the validation loss and metric, then set the model back to train mode. ```python # turn off gradients with torch.no_grad(): # set model to evaluation mode model.eval() # validation pass here for images, labels in testloader: ... # set model back to train mode model.train() ``` > **Exercise:** Add dropout to your model and train it on Fashion-MNIST again. See if you can get a lower validation loss or higher accuracy. ``` ## Define your model with dropout added from torch import nn, optim import torch.nn.functional as F class Classifier(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(784, 256) self.fc2 = nn.Linear(256, 128) self.fc3 = nn.Linear(128, 64) self.fc4 = nn.Linear(64, 10) # Dropout module with 0.2 drop probability self.dropout = nn.Dropout(p=0.2) def forward(self, x): # make sure input tensor is flattened x = x.view(x.shape[0], -1) x = self.dropout(F.relu(self.fc1(x))) x = self.dropout(F.relu(self.fc2(x))) x = self.dropout(F.relu(self.fc3(x))) x = self.dropout(F.log_softmax(self.fc4(x), dim=1)) return x model = Classifier() criterion = nn.NLLLoss() optimiser = optim.Adam(model.parameters(), lr=0.003) epochs = 10 steps = 0 train_losses, test_losses = [], [] for e in range(epochs): running_loss = 0 for images, labels in trainloader: optimiser.zero_grad() log_ps = model(images) loss = criterion(log_ps, labels) loss.backward() optimiser.step() running_loss += loss.item() # After for loop has completed else: test_loss = 0 accuracy = 0 # Turn off gradients for validation, saves memory and computation with torch.no_grad(): # Set model to eval - no dropout model.eval() for images, labels in testloader: log_ps = model(images) test_loss += criterion(log_ps, labels) ps = torch.exp(log_ps) top_p, top_class = ps.topk(1, dim=1) equals = top_class == labels.view(*top_class.shape) accuracy += torch.mean(equals.type(torch.FloatTensor)) # Set model back to train mode model.train() train_losses.append(running_loss/len(trainloader)) test_losses.append(test_loss/len(testloader)) print(f'Epoch: {e + 1} out of {epochs}') print(f'Training loss: {running_loss/len(trainloader):.3f}') print(f'Test Loss: {test_loss/len(testloader):.3f}') print(f'Test Accuracy: {accuracy/len(trainloader):.3f}') %matplotlib inline %config InlineBackend.figure_foramt = 'retina' import matplotlib.pyplot as plt plt.plot(train_losses, label='Training Loss') plt.plot(test_losses, label='Validation Loss') plt.legend(frameon=False) ``` ## Inference Now that the model is trained, we can use it for inference. We've done this before, but now we need to remember to set the model in inference mode with `model.eval()`. You'll also want to turn off autograd with the `torch.no_grad()` context. ``` # Import helper module (should be in the repo) import helper # Test out your network! model.eval() dataiter = iter(testloader) images, labels = dataiter.next() img = images[0] # Convert 2D image to 1D vector img = img.view(1, 784) # Calculate the class probabilities (softmax) for img with torch.no_grad(): output = model.forward(img) ps = torch.exp(output) # Plot the image and probabilities helper.view_classify(img.view(1, 28, 28), ps, version='Fashion') ``` ## Next Up! In the next part, I'll show you how to save your trained models. In general, you won't want to train a model everytime you need it. Instead, you'll train once, save it, then load the model when you want to train more or use if for inference.
github_jupyter
testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) import torch from torchvision import datasets, transforms # Define a transform to normalize the data transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]) # Download and load the training data trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) # Download and load the test data testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) from torch import nn, optim import torch.nn.functional as F class Classifier(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(784, 256) self.fc2 = nn.Linear(256, 128) self.fc3 = nn.Linear(128, 64) self.fc4 = nn.Linear(64, 10) def forward(self, x): # make sure input tensor is flattened x = x.view(x.shape[0], -1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = F.relu(self.fc3(x)) x = F.log_softmax(self.fc4(x), dim=1) return x model = Classifier() images, labels = next(iter(testloader)) # Get the class probabilities ps = torch.exp(model(images)) # Make sure the shape is appropriate, we should get 10 class probabilities for 64 examples print(ps.shape) top_p, top_class = ps.topk(1, dim=1) # Look at the most likely classes for the first 10 examples print(top_class[:10,:]) equals = top_class == labels equals = top_class == labels.view(*top_class.shape) RuntimeError: mean is not implemented for type torch.ByteTensor accuracy = torch.mean(equals.type(torch.FloatTensor)) print(f'Accuracy: {accuracy.item()*100}%') # turn off gradients with torch.no_grad(): # validation pass here for images, labels in testloader: ... model = Classifier() criterion = nn.NLLLoss() optimizer = optim.Adam(model.parameters(), lr=0.003) epochs = 30 steps = 0 train_losses, test_losses = [], [] for e in range(epochs): running_loss = 0 for images, labels in trainloader: optimizer.zero_grad() log_ps = model(images) loss = criterion(log_ps, labels) loss.backward() optimizer.step() running_loss += loss.item() else: ## Implement the validation pass and print out the validation accuracy # Load next images and labels in the text loader images, labels = next(iter(testloader)) # Return the probabilities for the images model ps = torch.exp(model(images)) # Return top % and class - using topk top_p, top_class = ps.topk(1, dim=1) # Check where the top class is equal to the label - creates binary values - 0 or 1 # Returns 64 true/false boolean values equals = top_class == labels.view(*top_class.shape) # Accuracy is the mean of the correct values # Convert to FLoatTensor first before returning the mean accuracy = torch.mean(equals.type(torch.FloatTensor)) print(f'Accuracy: {accuracy.item()*100}%') class Classifier(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(784, 256) self.fc2 = nn.Linear(256, 128) self.fc3 = nn.Linear(128, 64) self.fc4 = nn.Linear(64, 10) # Dropout module with 0.2 drop probability self.dropout = nn.Dropout(p=0.2) def forward(self, x): # make sure input tensor is flattened x = x.view(x.shape[0], -1) # Now with dropout x = self.dropout(F.relu(self.fc1(x))) x = self.dropout(F.relu(self.fc2(x))) x = self.dropout(F.relu(self.fc3(x))) # output so no dropout here x = F.log_softmax(self.fc4(x), dim=1) return x # turn off gradients with torch.no_grad(): # set model to evaluation mode model.eval() # validation pass here for images, labels in testloader: ... # set model back to train mode model.train() ## Define your model with dropout added from torch import nn, optim import torch.nn.functional as F class Classifier(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(784, 256) self.fc2 = nn.Linear(256, 128) self.fc3 = nn.Linear(128, 64) self.fc4 = nn.Linear(64, 10) # Dropout module with 0.2 drop probability self.dropout = nn.Dropout(p=0.2) def forward(self, x): # make sure input tensor is flattened x = x.view(x.shape[0], -1) x = self.dropout(F.relu(self.fc1(x))) x = self.dropout(F.relu(self.fc2(x))) x = self.dropout(F.relu(self.fc3(x))) x = self.dropout(F.log_softmax(self.fc4(x), dim=1)) return x model = Classifier() criterion = nn.NLLLoss() optimiser = optim.Adam(model.parameters(), lr=0.003) epochs = 10 steps = 0 train_losses, test_losses = [], [] for e in range(epochs): running_loss = 0 for images, labels in trainloader: optimiser.zero_grad() log_ps = model(images) loss = criterion(log_ps, labels) loss.backward() optimiser.step() running_loss += loss.item() # After for loop has completed else: test_loss = 0 accuracy = 0 # Turn off gradients for validation, saves memory and computation with torch.no_grad(): # Set model to eval - no dropout model.eval() for images, labels in testloader: log_ps = model(images) test_loss += criterion(log_ps, labels) ps = torch.exp(log_ps) top_p, top_class = ps.topk(1, dim=1) equals = top_class == labels.view(*top_class.shape) accuracy += torch.mean(equals.type(torch.FloatTensor)) # Set model back to train mode model.train() train_losses.append(running_loss/len(trainloader)) test_losses.append(test_loss/len(testloader)) print(f'Epoch: {e + 1} out of {epochs}') print(f'Training loss: {running_loss/len(trainloader):.3f}') print(f'Test Loss: {test_loss/len(testloader):.3f}') print(f'Test Accuracy: {accuracy/len(trainloader):.3f}') %matplotlib inline %config InlineBackend.figure_foramt = 'retina' import matplotlib.pyplot as plt plt.plot(train_losses, label='Training Loss') plt.plot(test_losses, label='Validation Loss') plt.legend(frameon=False) # Import helper module (should be in the repo) import helper # Test out your network! model.eval() dataiter = iter(testloader) images, labels = dataiter.next() img = images[0] # Convert 2D image to 1D vector img = img.view(1, 784) # Calculate the class probabilities (softmax) for img with torch.no_grad(): output = model.forward(img) ps = torch.exp(output) # Plot the image and probabilities helper.view_classify(img.view(1, 28, 28), ps, version='Fashion')
0.956806
0.990441
# Download all data from synapse and save into pickle ``` import synapseclient import synapseutils syn = synapseclient.Synapse() syn.login('adnama547','password') ### do not publish password!!! #download dicoms into 'files' - takes ~30-40 minutes; couldn't figure out how to suppress warnings #files = synapseutils.syncFromSynapse(syn, 'syn20608511') import pickle filename = 'all_data' outfile = open(filename,'wb') pickle.dump(files,outfile) outfile.close() print(len(files)) print(files[20]) dicoms=[x for x in files if 'dcm' in x.path] len(dicoms) stirs=[x for x in dicoms if 'tir' in x.experimentalCondition[0]] t1s=[x for x in dicoms if 'tse' in x.experimentalCondition[0]] print(len(stirs), len(t1s)) # 052 doesn't have T1 ``` ## Examine images ``` import skimage as sk from matplotlib import pyplot as plt import pydicom test_img=pydicom.dcmread(files[10].path) plt.imshow(test_img.pixel_array.T) seg_zip=files[1998].path import zipfile archive = zipfile.ZipFile(seg_zip, 'r') archive.extractall(path='/home/jupyter') import nibabel seg=nibabel.load('/home/jupyter/segmentation_50cases/segmentation-010.nii.gz') seg img=seg.get_data() print(seg.affine) print(img.shape) plt.imshow(img[:,:,10]) ``` ## Place dicoms into folders, create niftis, and then it should work with Michael's code ``` #exclude 32, 36 and 52 patients=['107', '053', '023', '010', '102', '123', '097', '087', '048', '089', '222', '077', '065', '106', '098', '158', '035', '081', '127', '119', '120', '268', '090', '085', '224', '144', '114', '225', '063', '099', '279', '019', '045', '088', '196', '095', '227', '136', '069', '109', '076', '046', '061', '072', '027', '138', '047'] patients.sort() import os from shutil import copyfile import dcmstack from glob import glob prefix='/home/jupyter/WBMRI' for this_patient in patients: folder_path=prefix+this_patient #make directory #os.mkdir(folder_path) #get file paths for that patient stir_file_paths=[x.path for x in stirs if this_patient in x.individualID[0]] stir_file_names=[x.files[0] for x in stirs if this_patient in x.individualID[0]] t1_file_paths=[x.path for x in t1s if this_patient in x.individualID[0]] t1_file_names=[x.files[0] for x in t1s if this_patient in x.individualID[0]] #copy dicoms into directory for i in range(0,len(stir_file_paths)): copyfile(stir_file_paths[i], folder_path+'/'+stir_file_names[i]) src_dcms = glob(os.path.join(folder_path, '*.dcm')) stacks = dcmstack.parse_and_stack(src_dcms) stack = stacks.values[0] nii = stack.to_nifti() nii.to_filename(this_patient+'_stir.nii.gz') stir_file_paths=[x.path for x in stirs if '010' in x.individualID[0]] t1_file_paths=[x.path for x in t1s if '010' in x.individualID[0]] folder_path=prefix+this_patient src_dcms = glob(os.path.join(folder_path, '*.dcm')) src_dcms stacks = dcmstack.parse_and_stack(src_dcms) stacks.values[0] stacks import shutil pip install dcmstack ```
github_jupyter
import synapseclient import synapseutils syn = synapseclient.Synapse() syn.login('adnama547','password') ### do not publish password!!! #download dicoms into 'files' - takes ~30-40 minutes; couldn't figure out how to suppress warnings #files = synapseutils.syncFromSynapse(syn, 'syn20608511') import pickle filename = 'all_data' outfile = open(filename,'wb') pickle.dump(files,outfile) outfile.close() print(len(files)) print(files[20]) dicoms=[x for x in files if 'dcm' in x.path] len(dicoms) stirs=[x for x in dicoms if 'tir' in x.experimentalCondition[0]] t1s=[x for x in dicoms if 'tse' in x.experimentalCondition[0]] print(len(stirs), len(t1s)) # 052 doesn't have T1 import skimage as sk from matplotlib import pyplot as plt import pydicom test_img=pydicom.dcmread(files[10].path) plt.imshow(test_img.pixel_array.T) seg_zip=files[1998].path import zipfile archive = zipfile.ZipFile(seg_zip, 'r') archive.extractall(path='/home/jupyter') import nibabel seg=nibabel.load('/home/jupyter/segmentation_50cases/segmentation-010.nii.gz') seg img=seg.get_data() print(seg.affine) print(img.shape) plt.imshow(img[:,:,10]) #exclude 32, 36 and 52 patients=['107', '053', '023', '010', '102', '123', '097', '087', '048', '089', '222', '077', '065', '106', '098', '158', '035', '081', '127', '119', '120', '268', '090', '085', '224', '144', '114', '225', '063', '099', '279', '019', '045', '088', '196', '095', '227', '136', '069', '109', '076', '046', '061', '072', '027', '138', '047'] patients.sort() import os from shutil import copyfile import dcmstack from glob import glob prefix='/home/jupyter/WBMRI' for this_patient in patients: folder_path=prefix+this_patient #make directory #os.mkdir(folder_path) #get file paths for that patient stir_file_paths=[x.path for x in stirs if this_patient in x.individualID[0]] stir_file_names=[x.files[0] for x in stirs if this_patient in x.individualID[0]] t1_file_paths=[x.path for x in t1s if this_patient in x.individualID[0]] t1_file_names=[x.files[0] for x in t1s if this_patient in x.individualID[0]] #copy dicoms into directory for i in range(0,len(stir_file_paths)): copyfile(stir_file_paths[i], folder_path+'/'+stir_file_names[i]) src_dcms = glob(os.path.join(folder_path, '*.dcm')) stacks = dcmstack.parse_and_stack(src_dcms) stack = stacks.values[0] nii = stack.to_nifti() nii.to_filename(this_patient+'_stir.nii.gz') stir_file_paths=[x.path for x in stirs if '010' in x.individualID[0]] t1_file_paths=[x.path for x in t1s if '010' in x.individualID[0]] folder_path=prefix+this_patient src_dcms = glob(os.path.join(folder_path, '*.dcm')) src_dcms stacks = dcmstack.parse_and_stack(src_dcms) stacks.values[0] stacks import shutil pip install dcmstack
0.072919
0.569254
# Predicting Boston Housing Prices ## Updating a model using SageMaker _Deep Learning Nanodegree Program | Deployment_ --- In this notebook, we will continue working with the [Boston Housing Dataset](https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html). Our goal in this notebook will be to train two different models and to use SageMaker to switch a deployed endpoint from using one model to the other. One of the benefits of using SageMaker to do this is that we can make the change without interrupting service. What this means is that we can continue sending data to the endpoint and at no point will that endpoint disappear. ## General Outline Typically, when using a notebook instance with SageMaker, you will proceed through the following steps. Of course, not every step will need to be done with each project. Also, there is quite a lot of room for variation in many of the steps, as you will see throughout these lessons. 1. Download or otherwise retrieve the data. 2. Process / Prepare the data. 3. Upload the processed data to S3. 4. Train a chosen model. 5. Test the trained model (typically using a batch transform job). 6. Deploy the trained model. 7. Use the deployed model. In this notebook we will be skipping step 5, testing the model. In addition, we will perform steps 4, 6 and 7 multiple times with different models. ## Step 0: Setting up the notebook We begin by setting up all of the necessary bits required to run our notebook. To start that means loading all of the Python modules we will need. ``` %matplotlib inline import os import numpy as np import pandas as pd from pprint import pprint import matplotlib.pyplot as plt from time import gmtime, strftime from sklearn.datasets import load_boston import sklearn.model_selection ``` In addition to the modules above, we need to import the various bits of SageMaker that we will be using. ``` import sagemaker from sagemaker import get_execution_role from sagemaker.amazon.amazon_estimator import get_image_uri from sagemaker.predictor import csv_serializer # This is an object that represents the SageMaker session that we are currently operating in. This # object contains some useful information that we will need to access later such as our region. session = sagemaker.Session() # This is an object that represents the IAM role that we are currently assigned. When we construct # and launch the training job later we will need to tell it what IAM role it should have. Since our # use case is relatively simple we will simply assign the training job the role we currently have. role = get_execution_role() ``` ## Step 1: Downloading the data Fortunately, this dataset can be retrieved using sklearn and so this step is relatively straightforward. ``` boston = load_boston() ``` ## Step 2: Preparing and splitting the data Given that this is clean tabular data, we don't need to do any processing. However, we do need to split the rows in the dataset up into train, test and validation sets. ``` # First we package up the input data and the target variable (the median value) as pandas dataframes. This # will make saving the data to a file a little easier later on. X_bos_pd = pd.DataFrame(boston.data, columns=boston.feature_names) Y_bos_pd = pd.DataFrame(boston.target) # We split the dataset into 2/3 training and 1/3 testing sets. X_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(X_bos_pd, Y_bos_pd, test_size=0.33) # Then we split the training set further into 2/3 training and 1/3 validation sets. X_train, X_val, Y_train, Y_val = sklearn.model_selection.train_test_split(X_train, Y_train, test_size=0.33) ``` ## Step 3: Uploading the training and validation files to S3 When a training job is constructed using SageMaker, a container is executed which performs the training operation. This container is given access to data that is stored in S3. This means that we need to upload the data we want to use for training to S3. We can use the SageMaker API to do this and hide some of the details. ### Save the data locally First we need to create the train and validation csv files which we will then upload to S3. ``` # This is our local data directory. We need to make sure that it exists. data_dir = '../data/boston' if not os.path.exists(data_dir): os.makedirs(data_dir) # We use pandas to save our train and validation data to csv files. Note that we make sure not to include header # information or an index as this is required by the built in algorithms provided by Amazon. Also, it is assumed # that the first entry in each row is the target variable. pd.concat([Y_val, X_val], axis=1).to_csv(os.path.join(data_dir, 'validation.csv'), header=False, index=False) pd.concat([Y_train, X_train], axis=1).to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False) ``` ### Upload to S3 Since we are currently running inside of a SageMaker session, we can use the object which represents this session to upload our data to the 'default' S3 bucket. Note that it is good practice to provide a custom prefix (essentially an S3 folder) to make sure that you don't accidentally interfere with data uploaded from some other notebook or project. ``` prefix = 'boston-update-endpoints' val_location = session.upload_data(os.path.join(data_dir, 'validation.csv'), key_prefix=prefix) train_location = session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix) ``` ## Step 4 (A): Train the XGBoost model Now that we have the training and validation data uploaded to S3, we can construct our XGBoost model and train it. We will be making use of the high level SageMaker API to do this which will make the resulting code a little easier to read at the cost of some flexibility. To construct an estimator, the object which we wish to train, we need to provide the location of a container which contains the training code. Since we are using a built in algorithm this container is provided by Amazon. However, the full name of the container is a bit lengthy and depends on the region that we are operating in. Fortunately, SageMaker provides a useful utility method called `get_image_uri` that constructs the image name for us. To use the `get_image_uri` method we need to provide it with our current region, which can be obtained from the session object, and the name of the algorithm we wish to use. In this notebook we will be using XGBoost however you could try another algorithm if you wish. The list of built in algorithms can be found in the list of [Common Parameters](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-algo-docker-registry-paths.html). ``` # As stated above, we use this utility method to construct the image name for the training container. xgb_container = get_image_uri(session.boto_region_name, 'xgboost') # Now that we know which container to use, we can construct the estimator object. xgb = sagemaker.estimator.Estimator(xgb_container, # The name of the training container role, # The IAM role to use (our current role in this case) train_instance_count=1, # The number of instances to use for training train_instance_type='ml.m4.xlarge', # The type of instance ot use for training output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix), # Where to save the output (the model artifacts) sagemaker_session=session) # The current SageMaker session ``` Before asking SageMaker to begin the training job, we should probably set any model specific hyperparameters. There are quite a few that can be set when using the XGBoost algorithm, below are just a few of them. If you would like to change the hyperparameters below or modify additional ones you can find additional information on the [XGBoost hyperparameter page](https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost_hyperparameters.html) ``` xgb.set_hyperparameters(max_depth=5, eta=0.2, gamma=4, min_child_weight=6, subsample=0.8, objective='reg:linear', early_stopping_rounds=10, num_round=200) ``` Now that we have our estimator object completely set up, it is time to train it. To do this we make sure that SageMaker knows our input data is in csv format and then execute the `fit` method. ``` # This is a wrapper around the location of our train and validation data, to make sure that SageMaker # knows our data is in csv format. s3_input_train = sagemaker.s3_input(s3_data=train_location, content_type='text/csv') s3_input_validation = sagemaker.s3_input(s3_data=val_location, content_type='text/csv') xgb.fit({'train': s3_input_train, 'validation': s3_input_validation}) ``` ## Step 5: Test the trained model We will be skipping this step for now. ## Step 6 (A): Deploy the trained model Even though we used the high level approach to construct and train the XGBoost model, we will be using the lower level approach to deploy it. One of the reasons for this is so that we have additional control over how the endpoint is constructed. This will be a little more clear later on when construct more advanced endpoints. ### Build the model Of course, before we can deploy the model, we need to first create it. The `fit` method that we used earlier created some model artifacts and we can use these to construct a model object. ``` xgb.model_data # Remember that a model needs to have a unique name xgb_model_name = "boston-update-xgboost-model" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) # We also need to tell SageMaker which container should be used for inference and where it should # retrieve the model artifacts from. In our case, the xgboost container that we used for training # can also be used for inference and the model artifacts come from the previous call to fit. xgb_primary_container = { "Image": xgb_container, "ModelDataUrl": xgb.model_data } # And lastly we construct the SageMaker model xgb_model_info = session.sagemaker_client.create_model( ModelName = xgb_model_name, ExecutionRoleArn = role, PrimaryContainer = xgb_primary_container) ``` ### Create the endpoint configuration Once we have a model we can start putting together the endpoint. Recall that to do this we need to first create an endpoint configuration, essentially the blueprint that SageMaker will use to build the endpoint itself. ``` # As before, we need to give our endpoint configuration a name which should be unique xgb_endpoint_config_name = "boston-update-xgboost-endpoint-config-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) # And then we ask SageMaker to construct the endpoint configuration xgb_endpoint_config_info = session.sagemaker_client.create_endpoint_config( EndpointConfigName = xgb_endpoint_config_name, ProductionVariants = [{ "InstanceType": "ml.m4.xlarge", "InitialVariantWeight": 1, "InitialInstanceCount": 1, "ModelName": xgb_model_name, "VariantName": "XGB-Model" }]) ``` ### Deploy the endpoint Now that the endpoint configuration has been created, we can ask SageMaker to build our endpoint. **Note:** This is a friendly (repeated) reminder that you are about to deploy an endpoint. Make sure that you shut it down once you've finished with it! ``` # Again, we need a unique name for our endpoint endpoint_name = "boston-update-endpoint-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) # And then we can deploy our endpoint endpoint_info = session.sagemaker_client.create_endpoint( EndpointName = endpoint_name, EndpointConfigName = xgb_endpoint_config_name) endpoint_dec = session.wait_for_endpoint(endpoint_name) ``` ## Step 7 (A): Use the model Now that our model is trained and deployed we can send some test data to it and evaluate the results. ``` response = session.sagemaker_runtime_client.invoke_endpoint( EndpointName = endpoint_name, ContentType = 'text/csv', Body = ','.join(map(str, X_test.values[0]))) pprint(response) result = response['Body'].read().decode("utf-8") pprint(result) Y_test.values[0] ``` ## Shut down the endpoint Now that we know that the XGBoost endpoint works, we can shut it down. We will make use of it again later. ``` session.sagemaker_client.delete_endpoint(EndpointName = endpoint_name) ``` ## Step 4 (B): Train the Linear model Suppose we are working in an environment where the XGBoost model that we trained earlier is becoming too costly. Perhaps the number of calls to our endpoint has increased and the length of time it takes to perform inference with the XGBoost model is becoming problematic. A possible solution might be to train a simpler model to see if it performs nearly as well. In our case, we will construct a linear model. The process of doing this is the same as for creating the XGBoost model that we created earlier, although there are different hyperparameters that we need to set. ``` # Similar to the XGBoost model, we will use the utility method to construct the image name for the training container. linear_container = get_image_uri(session.boto_region_name, 'linear-learner') # Now that we know which container to use, we can construct the estimator object. linear = sagemaker.estimator.Estimator(linear_container, # The name of the training container role, # The IAM role to use (our current role in this case) train_instance_count=1, # The number of instances to use for training train_instance_type='ml.m4.xlarge', # The type of instance ot use for training output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix), # Where to save the output (the model artifacts) sagemaker_session=session) # The current SageMaker session ``` Before asking SageMaker to train our model, we need to set some hyperparameters. In this case we will be using a linear model so the number of hyperparameters we need to set is much fewer. For more details see the [Linear model hyperparameter page](https://docs.aws.amazon.com/sagemaker/latest/dg/ll_hyperparameters.html) ``` linear.set_hyperparameters(feature_dim=13, # Our data has 13 feature columns predictor_type='regressor', # We wish to create a regression model mini_batch_size=200) # Here we set how many samples to look at in each iteration ``` Now that the hyperparameters have been set, we can ask SageMaker to fit the linear model to our data. ``` linear.fit({'train': s3_input_train, 'validation': s3_input_validation}) ``` ## Step 6 (B): Deploy the trained model Similar to the XGBoost model, now that we've fit the model we need to deploy it. Also like the XGBoost model, we will use the lower level approach so that we have more control over the endpoint that gets created. ### Build the model Of course, before we can deploy the model, we need to first create it. The `fit` method that we used earlier created some model artifacts and we can use these to construct a model object. ``` # First, we create a unique model name linear_model_name = "boston-update-linear-model" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) # We also need to tell SageMaker which container should be used for inference and where it should # retrieve the model artifacts from. In our case, the linear-learner container that we used for training # can also be used for inference. linear_primary_container = { "Image": linear_container, "ModelDataUrl": linear.model_data } # And lastly we construct the SageMaker model linear_model_info = session.sagemaker_client.create_model( ModelName = linear_model_name, ExecutionRoleArn = role, PrimaryContainer = linear_primary_container) ``` ### Create the endpoint configuration Once we have the model we can start putting together the endpoint by creating an endpoint configuration. ``` # As before, we need to give our endpoint configuration a name which should be unique linear_endpoint_config_name = "boston-linear-endpoint-config-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) # And then we ask SageMaker to construct the endpoint configuration linear_endpoint_config_info = session.sagemaker_client.create_endpoint_config( EndpointConfigName = linear_endpoint_config_name, ProductionVariants = [{ "InstanceType": "ml.m4.xlarge", "InitialVariantWeight": 1, "InitialInstanceCount": 1, "ModelName": linear_model_name, "VariantName": "Linear-Model" }]) ``` ### Deploy the endpoint Now that the endpoint configuration has been created, we can ask SageMaker to build our endpoint. **Note:** This is a friendly (repeated) reminder that you are about to deploy an endpoint. Make sure that you shut it down once you've finished with it! ``` # Again, we need a unique name for our endpoint endpoint_name = "boston-update-endpoint-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) # And then we can deploy our endpoint endpoint_info = session.sagemaker_client.create_endpoint( EndpointName = endpoint_name, EndpointConfigName = linear_endpoint_config_name) endpoint_dec = session.wait_for_endpoint(endpoint_name) ``` ## Step 7 (B): Use the model Just like with the XGBoost model, we will send some data to our endpoint to make sure that it is working properly. An important note is that the output format for the linear model is different from the XGBoost model. ``` response = session.sagemaker_runtime_client.invoke_endpoint( EndpointName = endpoint_name, ContentType = 'text/csv', Body = ','.join(map(str, X_test.values[0]))) pprint(response) result = response['Body'].read().decode("utf-8") pprint(result) Y_test.values[0] ``` ## Shut down the endpoint Now that we know that the Linear model's endpoint works, we can shut it down. ``` session.sagemaker_client.delete_endpoint(EndpointName = endpoint_name) ``` ## Step 6 (C): Deploy a combined model So far we've constructed two separate models which we could deploy and use. Before we talk about how we can change a deployed endpoint from one configuration to another, let's consider a slightly different situation. Suppose that before we switch from using only the XGBoost model to only the Linear model, we first want to do something like an A-B test, where we send some of the incoming data to the XGBoost model and some of the data to the Linear model. Fortunately, SageMaker provides this functionality. And to actually get SageMaker to do this for us is not too different from deploying a model in the way that we've already done. The only difference is that we need to list more than one model in the production variants parameter of the endpoint configuration. A reasonable question to ask is, how much data is sent to each of the models that I list in the production variants parameter? The answer is that it depends on the weight set for each model. Suppose that we have $k$ models listed in the production variants and that each model $i$ is assigned the weight $w_i$. Then each model $i$ will receive $w_i / W$ of the traffic where $W = \sum_{i} w_i$. In our case, since we have two models, the linear model and the XGBoost model, and each model has weight 1, we see that each model will get 1 / (1 + 1) = 1/2 of the data sent to the endpoint. ``` # As before, we need to give our endpoint configuration a name which should be unique combined_endpoint_config_name = "boston-combined-endpoint-config-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) # And then we ask SageMaker to construct the endpoint configuration combined_endpoint_config_info = session.sagemaker_client.create_endpoint_config( EndpointConfigName = combined_endpoint_config_name, ProductionVariants = [ { # First we include the linear model "InstanceType": "ml.m4.xlarge", "InitialVariantWeight": 1, "InitialInstanceCount": 1, "ModelName": linear_model_name, "VariantName": "Linear-Model" }, { # And next we include the xgb model "InstanceType": "ml.m4.xlarge", "InitialVariantWeight": 1, "InitialInstanceCount": 1, "ModelName": xgb_model_name, "VariantName": "XGB-Model" }]) ``` Now that we've created the endpoint configuration, we can ask SageMaker to construct the endpoint. **Note:** This is a friendly (repeated) reminder that you are about to deploy an endpoint. Make sure that you shut it down once you've finished with it! ``` # Again, we need a unique name for our endpoint endpoint_name = "boston-update-endpoint-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) # And then we can deploy our endpoint endpoint_info = session.sagemaker_client.create_endpoint( EndpointName = endpoint_name, EndpointConfigName = combined_endpoint_config_name) endpoint_dec = session.wait_for_endpoint(endpoint_name) ``` ## Step 7 (C): Use the model Now that we've constructed an endpoint which sends data to both the XGBoost model and the linear model we can send some data to the endpoint and see what sort of results we get back. ``` response = session.sagemaker_runtime_client.invoke_endpoint( EndpointName = endpoint_name, ContentType = 'text/csv', Body = ','.join(map(str, X_test.values[0]))) pprint(response) ``` Since looking at a single response doesn't give us a clear look at what is happening, we can instead take a look at a few different responses to our endpoint ``` for rec in range(10): response = session.sagemaker_runtime_client.invoke_endpoint( EndpointName = endpoint_name, ContentType = 'text/csv', Body = ','.join(map(str, X_test.values[rec]))) pprint(response) result = response['Body'].read().decode("utf-8") print(result) print(Y_test.values[rec]) ``` If at some point we aren't sure about the properties of a deployed endpoint, we can use the `describe_endpoint` function to get SageMaker to return a description of the deployed endpoint. ``` pprint(session.sagemaker_client.describe_endpoint(EndpointName=endpoint_name)) ``` ## Updating an Endpoint Now suppose that we've done our A-B test and the new linear model is working well enough. What we'd like to do now is to switch our endpoint from sending data to both the XGBoost model and the linear model to sending data only to the linear model. Of course, we don't really want to shut down the endpoint to do this as doing so would interrupt service to whoever depends on our endpoint. Instead, we can ask SageMaker to **update** an endpoint to a new endpoint configuration. What is actually happening is that SageMaker will set up a new endpoint with the new characteristics. Once this new endpoint is running, SageMaker will switch the old endpoint so that it now points at the newly deployed model, making sure that this happens seamlessly in the background. ``` session.sagemaker_client.update_endpoint(EndpointName=endpoint_name, EndpointConfigName=linear_endpoint_config_name) ``` To get a glimpse at what is going on, we can ask SageMaker to describe our in-use endpoint now, before the update process has completed. When we do so, we can see that the in-use endpoint still has the same characteristics it had before. ``` pprint(session.sagemaker_client.describe_endpoint(EndpointName=endpoint_name)) ``` If we now wait for the update process to complete, and then ask SageMaker to describe the endpoint, it will return the characteristics of the new endpoint configuration. ``` endpoint_dec = session.wait_for_endpoint(endpoint_name) pprint(session.sagemaker_client.describe_endpoint(EndpointName=endpoint_name)) ``` ## Shut down the endpoint Now that we've finished, we need to make sure to shut down the endpoint. ``` session.sagemaker_client.delete_endpoint(EndpointName = endpoint_name) ``` ## Optional: Clean up The default notebook instance on SageMaker doesn't have a lot of excess disk space available. As you continue to complete and execute notebooks you will eventually fill up this disk space, leading to errors which can be difficult to diagnose. Once you are completely finished using a notebook it is a good idea to remove the files that you created along the way. Of course, you can do this from the terminal or from the notebook hub if you would like. The cell below contains some commands to clean up the created files from within the notebook. ``` # First we will remove all of the files contained in the data_dir directory !rm $data_dir/* # And then we delete the directory itself !rmdir $data_dir data_dir ```
github_jupyter
%matplotlib inline import os import numpy as np import pandas as pd from pprint import pprint import matplotlib.pyplot as plt from time import gmtime, strftime from sklearn.datasets import load_boston import sklearn.model_selection import sagemaker from sagemaker import get_execution_role from sagemaker.amazon.amazon_estimator import get_image_uri from sagemaker.predictor import csv_serializer # This is an object that represents the SageMaker session that we are currently operating in. This # object contains some useful information that we will need to access later such as our region. session = sagemaker.Session() # This is an object that represents the IAM role that we are currently assigned. When we construct # and launch the training job later we will need to tell it what IAM role it should have. Since our # use case is relatively simple we will simply assign the training job the role we currently have. role = get_execution_role() boston = load_boston() # First we package up the input data and the target variable (the median value) as pandas dataframes. This # will make saving the data to a file a little easier later on. X_bos_pd = pd.DataFrame(boston.data, columns=boston.feature_names) Y_bos_pd = pd.DataFrame(boston.target) # We split the dataset into 2/3 training and 1/3 testing sets. X_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(X_bos_pd, Y_bos_pd, test_size=0.33) # Then we split the training set further into 2/3 training and 1/3 validation sets. X_train, X_val, Y_train, Y_val = sklearn.model_selection.train_test_split(X_train, Y_train, test_size=0.33) # This is our local data directory. We need to make sure that it exists. data_dir = '../data/boston' if not os.path.exists(data_dir): os.makedirs(data_dir) # We use pandas to save our train and validation data to csv files. Note that we make sure not to include header # information or an index as this is required by the built in algorithms provided by Amazon. Also, it is assumed # that the first entry in each row is the target variable. pd.concat([Y_val, X_val], axis=1).to_csv(os.path.join(data_dir, 'validation.csv'), header=False, index=False) pd.concat([Y_train, X_train], axis=1).to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False) prefix = 'boston-update-endpoints' val_location = session.upload_data(os.path.join(data_dir, 'validation.csv'), key_prefix=prefix) train_location = session.upload_data(os.path.join(data_dir, 'train.csv'), key_prefix=prefix) # As stated above, we use this utility method to construct the image name for the training container. xgb_container = get_image_uri(session.boto_region_name, 'xgboost') # Now that we know which container to use, we can construct the estimator object. xgb = sagemaker.estimator.Estimator(xgb_container, # The name of the training container role, # The IAM role to use (our current role in this case) train_instance_count=1, # The number of instances to use for training train_instance_type='ml.m4.xlarge', # The type of instance ot use for training output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix), # Where to save the output (the model artifacts) sagemaker_session=session) # The current SageMaker session xgb.set_hyperparameters(max_depth=5, eta=0.2, gamma=4, min_child_weight=6, subsample=0.8, objective='reg:linear', early_stopping_rounds=10, num_round=200) # This is a wrapper around the location of our train and validation data, to make sure that SageMaker # knows our data is in csv format. s3_input_train = sagemaker.s3_input(s3_data=train_location, content_type='text/csv') s3_input_validation = sagemaker.s3_input(s3_data=val_location, content_type='text/csv') xgb.fit({'train': s3_input_train, 'validation': s3_input_validation}) xgb.model_data # Remember that a model needs to have a unique name xgb_model_name = "boston-update-xgboost-model" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) # We also need to tell SageMaker which container should be used for inference and where it should # retrieve the model artifacts from. In our case, the xgboost container that we used for training # can also be used for inference and the model artifacts come from the previous call to fit. xgb_primary_container = { "Image": xgb_container, "ModelDataUrl": xgb.model_data } # And lastly we construct the SageMaker model xgb_model_info = session.sagemaker_client.create_model( ModelName = xgb_model_name, ExecutionRoleArn = role, PrimaryContainer = xgb_primary_container) # As before, we need to give our endpoint configuration a name which should be unique xgb_endpoint_config_name = "boston-update-xgboost-endpoint-config-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) # And then we ask SageMaker to construct the endpoint configuration xgb_endpoint_config_info = session.sagemaker_client.create_endpoint_config( EndpointConfigName = xgb_endpoint_config_name, ProductionVariants = [{ "InstanceType": "ml.m4.xlarge", "InitialVariantWeight": 1, "InitialInstanceCount": 1, "ModelName": xgb_model_name, "VariantName": "XGB-Model" }]) # Again, we need a unique name for our endpoint endpoint_name = "boston-update-endpoint-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) # And then we can deploy our endpoint endpoint_info = session.sagemaker_client.create_endpoint( EndpointName = endpoint_name, EndpointConfigName = xgb_endpoint_config_name) endpoint_dec = session.wait_for_endpoint(endpoint_name) response = session.sagemaker_runtime_client.invoke_endpoint( EndpointName = endpoint_name, ContentType = 'text/csv', Body = ','.join(map(str, X_test.values[0]))) pprint(response) result = response['Body'].read().decode("utf-8") pprint(result) Y_test.values[0] session.sagemaker_client.delete_endpoint(EndpointName = endpoint_name) # Similar to the XGBoost model, we will use the utility method to construct the image name for the training container. linear_container = get_image_uri(session.boto_region_name, 'linear-learner') # Now that we know which container to use, we can construct the estimator object. linear = sagemaker.estimator.Estimator(linear_container, # The name of the training container role, # The IAM role to use (our current role in this case) train_instance_count=1, # The number of instances to use for training train_instance_type='ml.m4.xlarge', # The type of instance ot use for training output_path='s3://{}/{}/output'.format(session.default_bucket(), prefix), # Where to save the output (the model artifacts) sagemaker_session=session) # The current SageMaker session linear.set_hyperparameters(feature_dim=13, # Our data has 13 feature columns predictor_type='regressor', # We wish to create a regression model mini_batch_size=200) # Here we set how many samples to look at in each iteration linear.fit({'train': s3_input_train, 'validation': s3_input_validation}) # First, we create a unique model name linear_model_name = "boston-update-linear-model" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) # We also need to tell SageMaker which container should be used for inference and where it should # retrieve the model artifacts from. In our case, the linear-learner container that we used for training # can also be used for inference. linear_primary_container = { "Image": linear_container, "ModelDataUrl": linear.model_data } # And lastly we construct the SageMaker model linear_model_info = session.sagemaker_client.create_model( ModelName = linear_model_name, ExecutionRoleArn = role, PrimaryContainer = linear_primary_container) # As before, we need to give our endpoint configuration a name which should be unique linear_endpoint_config_name = "boston-linear-endpoint-config-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) # And then we ask SageMaker to construct the endpoint configuration linear_endpoint_config_info = session.sagemaker_client.create_endpoint_config( EndpointConfigName = linear_endpoint_config_name, ProductionVariants = [{ "InstanceType": "ml.m4.xlarge", "InitialVariantWeight": 1, "InitialInstanceCount": 1, "ModelName": linear_model_name, "VariantName": "Linear-Model" }]) # Again, we need a unique name for our endpoint endpoint_name = "boston-update-endpoint-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) # And then we can deploy our endpoint endpoint_info = session.sagemaker_client.create_endpoint( EndpointName = endpoint_name, EndpointConfigName = linear_endpoint_config_name) endpoint_dec = session.wait_for_endpoint(endpoint_name) response = session.sagemaker_runtime_client.invoke_endpoint( EndpointName = endpoint_name, ContentType = 'text/csv', Body = ','.join(map(str, X_test.values[0]))) pprint(response) result = response['Body'].read().decode("utf-8") pprint(result) Y_test.values[0] session.sagemaker_client.delete_endpoint(EndpointName = endpoint_name) # As before, we need to give our endpoint configuration a name which should be unique combined_endpoint_config_name = "boston-combined-endpoint-config-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) # And then we ask SageMaker to construct the endpoint configuration combined_endpoint_config_info = session.sagemaker_client.create_endpoint_config( EndpointConfigName = combined_endpoint_config_name, ProductionVariants = [ { # First we include the linear model "InstanceType": "ml.m4.xlarge", "InitialVariantWeight": 1, "InitialInstanceCount": 1, "ModelName": linear_model_name, "VariantName": "Linear-Model" }, { # And next we include the xgb model "InstanceType": "ml.m4.xlarge", "InitialVariantWeight": 1, "InitialInstanceCount": 1, "ModelName": xgb_model_name, "VariantName": "XGB-Model" }]) # Again, we need a unique name for our endpoint endpoint_name = "boston-update-endpoint-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime()) # And then we can deploy our endpoint endpoint_info = session.sagemaker_client.create_endpoint( EndpointName = endpoint_name, EndpointConfigName = combined_endpoint_config_name) endpoint_dec = session.wait_for_endpoint(endpoint_name) response = session.sagemaker_runtime_client.invoke_endpoint( EndpointName = endpoint_name, ContentType = 'text/csv', Body = ','.join(map(str, X_test.values[0]))) pprint(response) for rec in range(10): response = session.sagemaker_runtime_client.invoke_endpoint( EndpointName = endpoint_name, ContentType = 'text/csv', Body = ','.join(map(str, X_test.values[rec]))) pprint(response) result = response['Body'].read().decode("utf-8") print(result) print(Y_test.values[rec]) pprint(session.sagemaker_client.describe_endpoint(EndpointName=endpoint_name)) session.sagemaker_client.update_endpoint(EndpointName=endpoint_name, EndpointConfigName=linear_endpoint_config_name) pprint(session.sagemaker_client.describe_endpoint(EndpointName=endpoint_name)) endpoint_dec = session.wait_for_endpoint(endpoint_name) pprint(session.sagemaker_client.describe_endpoint(EndpointName=endpoint_name)) session.sagemaker_client.delete_endpoint(EndpointName = endpoint_name) # First we will remove all of the files contained in the data_dir directory !rm $data_dir/* # And then we delete the directory itself !rmdir $data_dir data_dir
0.448668
0.98943
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns ``` Creation d'une liste des noms de colonnes pour l'importation du dataframe ``` liste = ['age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status','occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'salaire'] #dataframe data qui sera modifié ensuite data = pd.read_csv('adult.data', sep=' *, *', names=liste, index_col=False, engine='python', na_values='?' ) data.shape # suppression des doublons data = pd.DataFrame(data.drop_duplicates()) data.shape data[data.isna().any(axis=1)] # 2398 rows × 15 columns avec un NaN #dataframe df qui restera brut, au cas ou pour plus loin df = pd.read_csv('adult.data', sep=' *, *', names=liste, index_col=False, engine='python') #, na_values='?' # suppression des doublons df = pd.DataFrame(data.drop_duplicates()) data.head() data.info() data.describe() data.shape sns.heatmap(data.isnull(),yticklabels=False,cbar=True,cmap='plasma'); # suppression de la colonne fnlwgt qui ne sert à rien data.drop('fnlwgt',axis=1, inplace= True) data.columns data['age'].unique() data['workclass'].unique() data['education'].unique() sns.pairplot(data); # transformation des male en 0 et female en 1 data.sex = data.sex.map({'Male': 0, 'Female': 1}) # proportion hommes/femmes vals = df["sex"].value_counts() plt.bar(vals.index,vals); # visualisation des niveaux d'études vals = data["education-num"].value_counts() plt.bar(vals.index,vals); #visualisation des "races" vals = data["race"].value_counts() plt.bar(vals.index,vals); # visualisation des country d'origines. USA en grosse majorité data['native-country'].value_counts().plot(kind='bar') plt.xlabel('Country') plt.ylabel('Amount of people'); # race en fontion de l'age et sex en fonciton de l'age fig, (ax1,ax2) = plt.subplots(1,2,figsize=(15, 9)) sns.boxplot(x='race',y='age',data=df,palette='winter',ax=ax1) sns.boxplot(x='sex',y='age',data=df,palette='plasma',ax=ax2); # autre visu pour la répartition des sexes fig, (ax1,ax2) = plt.subplots(1,2, figsize=(10,4)) df['sex'].value_counts().plot(kind='pie', ax = ax1) plt.xlabel('Sex') plt.ylabel('Amount of people'); df["sex"].value_counts().plot(kind="bar", ax=ax2); # salaire en fonction de l'age et du nombre d'heures travaillées fig, (ax1,ax2) = plt.subplots(1,2,figsize=(15, 9)) sns.boxplot(x='salaire',y='age',data=df,palette='winter',ax=ax1) sns.boxplot(x='salaire',y='hours-per-week',data=df,palette='plasma',ax=ax2); ageData = data['age'].value_counts().to_dict() xAxis = [key for key in ageData] yAxis = [ageData[key] for key in ageData] plt.scatter(xAxis, yAxis) plt.xlabel('Age') plt.ylabel('individus'); # les études des personnes du dataset df['education'].value_counts().plot(kind='bar') plt.xlabel('étures') plt.ylabel('nombre de personnes'); # statut marital df['marital-status'].value_counts().plot(kind='bar') plt.xlabel('Marital Status') plt.ylabel('Amount of people'); # occupation df['occupation'].value_counts().plot(kind='bar') plt.xlabel('Occupation') plt.ylabel('Amount of people'); # workclass df['workclass'].value_counts().plot(kind='bar') plt.xlabel('Work Class') plt.ylabel('Amount of people'); data.tail() # les dummies en passant par des variables intermédiaires et en supprimant direct une colonne temp1 = pd.get_dummies(data['workclass'],drop_first=True) temp2 = pd.get_dummies(data['education'],drop_first=True) temp3 = pd.get_dummies(data['marital-status'],drop_first=True) temp4 = pd.get_dummies(data['occupation'],drop_first=True) temp5 = pd.get_dummies(data['relationship'],drop_first=True) temp6 = pd.get_dummies(data['race'],drop_first=True) temp7 = pd.get_dummies(data['native-country'],drop_first=True) # et op on concat tout ça data = pd.concat([data,temp1,temp2,temp3,temp4,temp5,temp6,temp7],axis=1) # ca drop dans tous les sens data.drop(['workclass','education','marital-status','occupation','relationship','race','native-country'],axis=1,inplace=True) # transformation des salaires en -1 et 1 car on a juste inférieur et supérieur à 50k data.salaire = data.salaire.map({'<=50K': -1, '>50K': 1}) # et aller, c'est repartit pour du drop # data.drop(['capital-gain','capital-loss'],axis=1,inplace=True) # non, finalement on va pas droper ça trop vite data.head() # on fait notre joli petit X et notre y y = data['salaire'] X = data.drop('salaire',axis=1) # découpage des train test from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1) print(f'La taille de X_train est : {X_train.shape}' ) print(f'La taille de X_test est : {X_test.shape}') # Standardisation from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) # Instanciation et entraînement du modèle from sklearn.linear_model import LogisticRegression reglog = LogisticRegression(solver='liblinear') reglog.fit(X_train, y_train) # Prédictions sur le test set y_pred = reglog.predict(X_test) # Accuracy score --> plus de 85 % from sklearn.metrics import accuracy_score print(accuracy_score(y_test,y_pred)) # Classification report from sklearn.metrics import classification_report print(classification_report(y_test,y_pred)) # Matrice de confusion from sklearn.metrics import confusion_matrix print(confusion_matrix(y_test, y_pred)) # Courbe ROC from sklearn.metrics import roc_curve, auc proba = reglog.predict_proba(X_test) fpr, tpr, thresholds = roc_curve(y_test, proba[:, 1], pos_label=1, drop_intermediate=False) fig, ax = plt.subplots(1, 1, figsize=(6,6)) ax.plot([0, 1], [0, 1], 'k--') # aucf = roc_auc_score(y_test == clr.classes_[0], probas[:, 0]) # première méthode aucf = auc(fpr, tpr) # seconde méthode ax.plot(fpr, tpr, label='auc=%1.5f' % aucf) ax.set_title('Courbe ROC') ax.text(0.5, 0.3, "plus mauvais que\nle hasard dans\ncette zone") ax.legend(); # Coefficients coef = pd.DataFrame(np.concatenate([reglog.intercept_.reshape(-1,1), reglog.coef_],axis=1), index = ["Coefficients"], columns = ["constante"]+list(X.columns)).T coef["OR"] = np.exp(coef.Coefficients) coef["1/OR"] = np.exp(-coef.Coefficients) coef.head(50) # coef de la fin coef.tail(50) # on classe les coefs, en valeur absolue pour avoir les coef ayant le plus d'impact en + ou - # vérifier avec les tableaux au dessus s'ils sont en plus ou en moins coef_abs = abs(coef) coef_abs.sort_values(by=['Coefficients'],ascending=False).head(20) # cross_val_score en 5 découpes, on voit la moyenne proche de 85% pour les 5 from sklearn.model_selection import cross_val_score cross_val_score(reglog, X, y, cv=5) from sklearn.metrics import make_scorer, r2_score cross_val_score(reglog, X, y, cv=5, scoring='r2') from sklearn.model_selection import GridSearchCV from sklearn.linear_model import LogisticRegression param_grid = {'C': [0.001, 0.01, 0.1, 1, 10], "penalty":["l1","l2"]} grid = GridSearchCV(LogisticRegression(), param_grid, cv=5) grid.fit(X_train, y_train) print("Best cross-validation score: {:.2f}".format(grid.best_score_)) print("Best parameters: ", grid.best_params_) print("Best estimator: ", grid.best_estimator_) ``` ## Knn ``` from sklearn.neighbors import KNeighborsClassifier scores = [] for k in range(1,31): knn = KNeighborsClassifier(n_neighbors=k, algorithm='brute') knn.fit(X_train, y_train) #pred = knn.predict(X_test) scores.append(knn.score(X_test,y_test)) plt.plot(range(1,31),scores); ``` ## Linéar Regression sans la colonne " native pays" ``` #dataframe data_sans_nativ qui sera modifié ensuite liste = ['age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status','occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'salaire'] data_sans_nativ = pd.read_csv('adult.data', sep=' *, *', names=liste, index_col=False, engine='python', na_values='?' ) # suppression des doublons data_sans_nativ = pd.DataFrame(data_sans_nativ.drop_duplicates()) # drop drop data_sans_nativ.drop('fnlwgt',axis=1, inplace= True) data_sans_nativ.drop('native-country',axis=1, inplace= True) data_sans_nativ.sex = data_sans_nativ.sex.map({'Male': 0, 'Female': 1}) # les dummies en passant par des variables intermédiaires et en supprimant direct une colonne temp_1 = pd.get_dummies(data_sans_nativ['workclass'],drop_first=True) temp_2 = pd.get_dummies(data_sans_nativ['education'],drop_first=True) temp_3 = pd.get_dummies(data_sans_nativ['marital-status'],drop_first=True) temp_4 = pd.get_dummies(data_sans_nativ['occupation'],drop_first=True) temp_5 = pd.get_dummies(data_sans_nativ['relationship'],drop_first=True) temp_6 = pd.get_dummies(data_sans_nativ['race'],drop_first=True) # et op on concat tout ça data_sans_nativ = pd.concat([data_sans_nativ,temp_1,temp_2,temp_3,temp_4,temp_5,temp_6],axis=1) # ca drop dans tous les sens data_sans_nativ.drop(['workclass','education','marital-status','occupation','relationship','race'],axis=1,inplace=True) # transformation des salaires en -1 et 1 car on a juste inférieur et supérieur à 50k data_sans_nativ.salaire = data_sans_nativ.salaire.map({'<=50K': -1, '>50K': 1}) data_sans_nativ.shape y = data_sans_nativ['salaire'] X = data_sans_nativ.drop('salaire',axis=1) # découpage des train test from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1) # Standardisation from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) print(f'La taille de X_train est : {X_train.shape}' ) print(f'La taille de X_test est : {X_test.shape}') # Instanciation et entraînement du modèle from sklearn.linear_model import LogisticRegression reglog = LogisticRegression(solver='liblinear') reglog.fit(X_train, y_train) # Prédictions sur le test set y_pred = reglog.predict(X_test) # Accuracy score --> plus de 85 % from sklearn.metrics import accuracy_score print(accuracy_score(y_test,y_pred)) # Classification report from sklearn.metrics import classification_report print(classification_report(y_test,y_pred)) # Matrice de confusion from sklearn.metrics import confusion_matrix print(confusion_matrix(y_test, y_pred)) # Courbe ROC from sklearn.metrics import roc_curve, auc proba = reglog.predict_proba(X_test) fpr, tpr, thresholds = roc_curve(y_test, proba[:, 1], pos_label=1, drop_intermediate=False) fig, ax = plt.subplots(1, 1, figsize=(6,6)) ax.plot([0, 1], [0, 1], 'k--') # aucf = roc_auc_score(y_test == clr.classes_[0], probas[:, 0]) # première méthode aucf = auc(fpr, tpr) # seconde méthode ax.plot(fpr, tpr, label='auc=%1.5f' % aucf) ax.set_title('Courbe ROC') ax.text(0.5, 0.3, "plus mauvais que\nle hasard dans\ncette zone") ax.legend(); # Coefficients coef = pd.DataFrame(np.concatenate([reglog.intercept_.reshape(-1,1), reglog.coef_],axis=1), index = ["Coefficients"], columns = ["constante"]+list(X.columns)).T coef["OR"] = np.exp(coef.Coefficients) coef["1/OR"] = np.exp(-coef.Coefficients) coef.head(50) # on classe les coefs, en valeur absolue pour avoir les coef ayant le plus d'impact en + ou - # vérifier avec les tableaux au dessus s'ils sont en plus ou en moins coef.sort_values(by=['OR'], ascending=False).head(20) # cross_val_score en 5 découpes, on voit la moyenne proche de 85% pour les 5 from sklearn.model_selection import cross_val_score cross_val_score(reglog, X, y, cv=5) from sklearn.model_selection import GridSearchCV from sklearn.linear_model import LogisticRegression param_grid = {'C': [0.001, 0.01, 0.1, 1, 10], "penalty":["l1","l2"]} grid = GridSearchCV(LogisticRegression(), param_grid, cv=5) grid.fit(X_train, y_train) print("Best cross-validation score: {:.2f}".format(grid.best_score_)) print("Best parameters: ", grid.best_params_) print("Best estimator: ", grid.best_estimator_) ## KNN data_sans_nativ : Meuilleur résultat qu'avec les pays ( environ 0.5 de plus) from sklearn.neighbors import KNeighborsClassifier scores = [] for k in range(1,31): knn = KNeighborsClassifier(n_neighbors=k, algorithm='brute') knn.fit(X_train, y_train) #pred = knn.predict(X_test) scores.append(knn.score(X_test,y_test)) plt.plot(range(1,31),scores); ``` ## TEST sur la base TEST ### D'après le modèle : Regression Log+ KNN sans la colonne " native pays" ``` #dataframe test liste = ['age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status','occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'salaire'] test = pd.read_csv('adult.test', sep=' *, *', names=liste, index_col=False, engine='python', na_values='?' , skiprows=1) test.drop('fnlwgt',axis=1, inplace= True) test.drop('native-country',axis=1, inplace= True) # suppression des doublons test = pd.DataFrame(test.drop_duplicates()) test.sex = test.sex.map({'Male': 0, 'Female': 1}) # les dummies en passant par des variables intermédiaires et en supprimant direct une colonne temp_test_1 = pd.get_dummies(test['workclass'],drop_first=True) temp_test_2 = pd.get_dummies(test['education'],drop_first=True) temp_test_3 = pd.get_dummies(test['marital-status'],drop_first=True) temp_test_4 = pd.get_dummies(test['occupation'],drop_first=True) temp_test_5 = pd.get_dummies(test['relationship'],drop_first=True) temp_test_6 = pd.get_dummies(test['race'],drop_first=True) # et op on concat tout ça test = pd.concat([test,temp_test_1,temp_test_2,temp_test_3,temp_test_4,temp_test_5,temp_test_6],axis=1) # ca drop dans tous les sens test.drop(['workclass','education','marital-status','occupation','relationship','race'],axis=1,inplace=True) yDeTest = test['salaire'] XDeTest = test.drop('salaire',axis=1) # transformation des salaires en -1 et 1 car on a juste inférieur et supérieur à 50k yDeTest = yDeTest.map({'<=50K.': -1, '>50K.': 1}) # Standardisation from sklearn.preprocessing import StandardScaler sc = StandardScaler() XDeTest = sc.fit_transform(XDeTest) # Prédictions sur le test set y_predDeTest_knn = knn.predict(XDeTest) y_predDeTest_reglog = reglog.predict(XDeTest) from sklearn.metrics import accuracy_score print(f'Score sur base test en KNN: {accuracy_score(yDeTest,y_predDeTest_knn)}' ) print(f'Score sur base test en REg Log: {accuracy_score(yDeTest,y_predDeTest_reglog)}' ) from sklearn.metrics import classification_report print(f' KNN {classification_report(yDeTest,y_predDeTest_knn)}') print(f' REG LOG {classification_report(yDeTest,y_predDeTest_reglog)}') XDeTest.shape ```
github_jupyter
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns liste = ['age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status','occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'salaire'] #dataframe data qui sera modifié ensuite data = pd.read_csv('adult.data', sep=' *, *', names=liste, index_col=False, engine='python', na_values='?' ) data.shape # suppression des doublons data = pd.DataFrame(data.drop_duplicates()) data.shape data[data.isna().any(axis=1)] # 2398 rows × 15 columns avec un NaN #dataframe df qui restera brut, au cas ou pour plus loin df = pd.read_csv('adult.data', sep=' *, *', names=liste, index_col=False, engine='python') #, na_values='?' # suppression des doublons df = pd.DataFrame(data.drop_duplicates()) data.head() data.info() data.describe() data.shape sns.heatmap(data.isnull(),yticklabels=False,cbar=True,cmap='plasma'); # suppression de la colonne fnlwgt qui ne sert à rien data.drop('fnlwgt',axis=1, inplace= True) data.columns data['age'].unique() data['workclass'].unique() data['education'].unique() sns.pairplot(data); # transformation des male en 0 et female en 1 data.sex = data.sex.map({'Male': 0, 'Female': 1}) # proportion hommes/femmes vals = df["sex"].value_counts() plt.bar(vals.index,vals); # visualisation des niveaux d'études vals = data["education-num"].value_counts() plt.bar(vals.index,vals); #visualisation des "races" vals = data["race"].value_counts() plt.bar(vals.index,vals); # visualisation des country d'origines. USA en grosse majorité data['native-country'].value_counts().plot(kind='bar') plt.xlabel('Country') plt.ylabel('Amount of people'); # race en fontion de l'age et sex en fonciton de l'age fig, (ax1,ax2) = plt.subplots(1,2,figsize=(15, 9)) sns.boxplot(x='race',y='age',data=df,palette='winter',ax=ax1) sns.boxplot(x='sex',y='age',data=df,palette='plasma',ax=ax2); # autre visu pour la répartition des sexes fig, (ax1,ax2) = plt.subplots(1,2, figsize=(10,4)) df['sex'].value_counts().plot(kind='pie', ax = ax1) plt.xlabel('Sex') plt.ylabel('Amount of people'); df["sex"].value_counts().plot(kind="bar", ax=ax2); # salaire en fonction de l'age et du nombre d'heures travaillées fig, (ax1,ax2) = plt.subplots(1,2,figsize=(15, 9)) sns.boxplot(x='salaire',y='age',data=df,palette='winter',ax=ax1) sns.boxplot(x='salaire',y='hours-per-week',data=df,palette='plasma',ax=ax2); ageData = data['age'].value_counts().to_dict() xAxis = [key for key in ageData] yAxis = [ageData[key] for key in ageData] plt.scatter(xAxis, yAxis) plt.xlabel('Age') plt.ylabel('individus'); # les études des personnes du dataset df['education'].value_counts().plot(kind='bar') plt.xlabel('étures') plt.ylabel('nombre de personnes'); # statut marital df['marital-status'].value_counts().plot(kind='bar') plt.xlabel('Marital Status') plt.ylabel('Amount of people'); # occupation df['occupation'].value_counts().plot(kind='bar') plt.xlabel('Occupation') plt.ylabel('Amount of people'); # workclass df['workclass'].value_counts().plot(kind='bar') plt.xlabel('Work Class') plt.ylabel('Amount of people'); data.tail() # les dummies en passant par des variables intermédiaires et en supprimant direct une colonne temp1 = pd.get_dummies(data['workclass'],drop_first=True) temp2 = pd.get_dummies(data['education'],drop_first=True) temp3 = pd.get_dummies(data['marital-status'],drop_first=True) temp4 = pd.get_dummies(data['occupation'],drop_first=True) temp5 = pd.get_dummies(data['relationship'],drop_first=True) temp6 = pd.get_dummies(data['race'],drop_first=True) temp7 = pd.get_dummies(data['native-country'],drop_first=True) # et op on concat tout ça data = pd.concat([data,temp1,temp2,temp3,temp4,temp5,temp6,temp7],axis=1) # ca drop dans tous les sens data.drop(['workclass','education','marital-status','occupation','relationship','race','native-country'],axis=1,inplace=True) # transformation des salaires en -1 et 1 car on a juste inférieur et supérieur à 50k data.salaire = data.salaire.map({'<=50K': -1, '>50K': 1}) # et aller, c'est repartit pour du drop # data.drop(['capital-gain','capital-loss'],axis=1,inplace=True) # non, finalement on va pas droper ça trop vite data.head() # on fait notre joli petit X et notre y y = data['salaire'] X = data.drop('salaire',axis=1) # découpage des train test from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1) print(f'La taille de X_train est : {X_train.shape}' ) print(f'La taille de X_test est : {X_test.shape}') # Standardisation from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) # Instanciation et entraînement du modèle from sklearn.linear_model import LogisticRegression reglog = LogisticRegression(solver='liblinear') reglog.fit(X_train, y_train) # Prédictions sur le test set y_pred = reglog.predict(X_test) # Accuracy score --> plus de 85 % from sklearn.metrics import accuracy_score print(accuracy_score(y_test,y_pred)) # Classification report from sklearn.metrics import classification_report print(classification_report(y_test,y_pred)) # Matrice de confusion from sklearn.metrics import confusion_matrix print(confusion_matrix(y_test, y_pred)) # Courbe ROC from sklearn.metrics import roc_curve, auc proba = reglog.predict_proba(X_test) fpr, tpr, thresholds = roc_curve(y_test, proba[:, 1], pos_label=1, drop_intermediate=False) fig, ax = plt.subplots(1, 1, figsize=(6,6)) ax.plot([0, 1], [0, 1], 'k--') # aucf = roc_auc_score(y_test == clr.classes_[0], probas[:, 0]) # première méthode aucf = auc(fpr, tpr) # seconde méthode ax.plot(fpr, tpr, label='auc=%1.5f' % aucf) ax.set_title('Courbe ROC') ax.text(0.5, 0.3, "plus mauvais que\nle hasard dans\ncette zone") ax.legend(); # Coefficients coef = pd.DataFrame(np.concatenate([reglog.intercept_.reshape(-1,1), reglog.coef_],axis=1), index = ["Coefficients"], columns = ["constante"]+list(X.columns)).T coef["OR"] = np.exp(coef.Coefficients) coef["1/OR"] = np.exp(-coef.Coefficients) coef.head(50) # coef de la fin coef.tail(50) # on classe les coefs, en valeur absolue pour avoir les coef ayant le plus d'impact en + ou - # vérifier avec les tableaux au dessus s'ils sont en plus ou en moins coef_abs = abs(coef) coef_abs.sort_values(by=['Coefficients'],ascending=False).head(20) # cross_val_score en 5 découpes, on voit la moyenne proche de 85% pour les 5 from sklearn.model_selection import cross_val_score cross_val_score(reglog, X, y, cv=5) from sklearn.metrics import make_scorer, r2_score cross_val_score(reglog, X, y, cv=5, scoring='r2') from sklearn.model_selection import GridSearchCV from sklearn.linear_model import LogisticRegression param_grid = {'C': [0.001, 0.01, 0.1, 1, 10], "penalty":["l1","l2"]} grid = GridSearchCV(LogisticRegression(), param_grid, cv=5) grid.fit(X_train, y_train) print("Best cross-validation score: {:.2f}".format(grid.best_score_)) print("Best parameters: ", grid.best_params_) print("Best estimator: ", grid.best_estimator_) from sklearn.neighbors import KNeighborsClassifier scores = [] for k in range(1,31): knn = KNeighborsClassifier(n_neighbors=k, algorithm='brute') knn.fit(X_train, y_train) #pred = knn.predict(X_test) scores.append(knn.score(X_test,y_test)) plt.plot(range(1,31),scores); #dataframe data_sans_nativ qui sera modifié ensuite liste = ['age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status','occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'salaire'] data_sans_nativ = pd.read_csv('adult.data', sep=' *, *', names=liste, index_col=False, engine='python', na_values='?' ) # suppression des doublons data_sans_nativ = pd.DataFrame(data_sans_nativ.drop_duplicates()) # drop drop data_sans_nativ.drop('fnlwgt',axis=1, inplace= True) data_sans_nativ.drop('native-country',axis=1, inplace= True) data_sans_nativ.sex = data_sans_nativ.sex.map({'Male': 0, 'Female': 1}) # les dummies en passant par des variables intermédiaires et en supprimant direct une colonne temp_1 = pd.get_dummies(data_sans_nativ['workclass'],drop_first=True) temp_2 = pd.get_dummies(data_sans_nativ['education'],drop_first=True) temp_3 = pd.get_dummies(data_sans_nativ['marital-status'],drop_first=True) temp_4 = pd.get_dummies(data_sans_nativ['occupation'],drop_first=True) temp_5 = pd.get_dummies(data_sans_nativ['relationship'],drop_first=True) temp_6 = pd.get_dummies(data_sans_nativ['race'],drop_first=True) # et op on concat tout ça data_sans_nativ = pd.concat([data_sans_nativ,temp_1,temp_2,temp_3,temp_4,temp_5,temp_6],axis=1) # ca drop dans tous les sens data_sans_nativ.drop(['workclass','education','marital-status','occupation','relationship','race'],axis=1,inplace=True) # transformation des salaires en -1 et 1 car on a juste inférieur et supérieur à 50k data_sans_nativ.salaire = data_sans_nativ.salaire.map({'<=50K': -1, '>50K': 1}) data_sans_nativ.shape y = data_sans_nativ['salaire'] X = data_sans_nativ.drop('salaire',axis=1) # découpage des train test from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1) # Standardisation from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) print(f'La taille de X_train est : {X_train.shape}' ) print(f'La taille de X_test est : {X_test.shape}') # Instanciation et entraînement du modèle from sklearn.linear_model import LogisticRegression reglog = LogisticRegression(solver='liblinear') reglog.fit(X_train, y_train) # Prédictions sur le test set y_pred = reglog.predict(X_test) # Accuracy score --> plus de 85 % from sklearn.metrics import accuracy_score print(accuracy_score(y_test,y_pred)) # Classification report from sklearn.metrics import classification_report print(classification_report(y_test,y_pred)) # Matrice de confusion from sklearn.metrics import confusion_matrix print(confusion_matrix(y_test, y_pred)) # Courbe ROC from sklearn.metrics import roc_curve, auc proba = reglog.predict_proba(X_test) fpr, tpr, thresholds = roc_curve(y_test, proba[:, 1], pos_label=1, drop_intermediate=False) fig, ax = plt.subplots(1, 1, figsize=(6,6)) ax.plot([0, 1], [0, 1], 'k--') # aucf = roc_auc_score(y_test == clr.classes_[0], probas[:, 0]) # première méthode aucf = auc(fpr, tpr) # seconde méthode ax.plot(fpr, tpr, label='auc=%1.5f' % aucf) ax.set_title('Courbe ROC') ax.text(0.5, 0.3, "plus mauvais que\nle hasard dans\ncette zone") ax.legend(); # Coefficients coef = pd.DataFrame(np.concatenate([reglog.intercept_.reshape(-1,1), reglog.coef_],axis=1), index = ["Coefficients"], columns = ["constante"]+list(X.columns)).T coef["OR"] = np.exp(coef.Coefficients) coef["1/OR"] = np.exp(-coef.Coefficients) coef.head(50) # on classe les coefs, en valeur absolue pour avoir les coef ayant le plus d'impact en + ou - # vérifier avec les tableaux au dessus s'ils sont en plus ou en moins coef.sort_values(by=['OR'], ascending=False).head(20) # cross_val_score en 5 découpes, on voit la moyenne proche de 85% pour les 5 from sklearn.model_selection import cross_val_score cross_val_score(reglog, X, y, cv=5) from sklearn.model_selection import GridSearchCV from sklearn.linear_model import LogisticRegression param_grid = {'C': [0.001, 0.01, 0.1, 1, 10], "penalty":["l1","l2"]} grid = GridSearchCV(LogisticRegression(), param_grid, cv=5) grid.fit(X_train, y_train) print("Best cross-validation score: {:.2f}".format(grid.best_score_)) print("Best parameters: ", grid.best_params_) print("Best estimator: ", grid.best_estimator_) ## KNN data_sans_nativ : Meuilleur résultat qu'avec les pays ( environ 0.5 de plus) from sklearn.neighbors import KNeighborsClassifier scores = [] for k in range(1,31): knn = KNeighborsClassifier(n_neighbors=k, algorithm='brute') knn.fit(X_train, y_train) #pred = knn.predict(X_test) scores.append(knn.score(X_test,y_test)) plt.plot(range(1,31),scores); #dataframe test liste = ['age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status','occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'salaire'] test = pd.read_csv('adult.test', sep=' *, *', names=liste, index_col=False, engine='python', na_values='?' , skiprows=1) test.drop('fnlwgt',axis=1, inplace= True) test.drop('native-country',axis=1, inplace= True) # suppression des doublons test = pd.DataFrame(test.drop_duplicates()) test.sex = test.sex.map({'Male': 0, 'Female': 1}) # les dummies en passant par des variables intermédiaires et en supprimant direct une colonne temp_test_1 = pd.get_dummies(test['workclass'],drop_first=True) temp_test_2 = pd.get_dummies(test['education'],drop_first=True) temp_test_3 = pd.get_dummies(test['marital-status'],drop_first=True) temp_test_4 = pd.get_dummies(test['occupation'],drop_first=True) temp_test_5 = pd.get_dummies(test['relationship'],drop_first=True) temp_test_6 = pd.get_dummies(test['race'],drop_first=True) # et op on concat tout ça test = pd.concat([test,temp_test_1,temp_test_2,temp_test_3,temp_test_4,temp_test_5,temp_test_6],axis=1) # ca drop dans tous les sens test.drop(['workclass','education','marital-status','occupation','relationship','race'],axis=1,inplace=True) yDeTest = test['salaire'] XDeTest = test.drop('salaire',axis=1) # transformation des salaires en -1 et 1 car on a juste inférieur et supérieur à 50k yDeTest = yDeTest.map({'<=50K.': -1, '>50K.': 1}) # Standardisation from sklearn.preprocessing import StandardScaler sc = StandardScaler() XDeTest = sc.fit_transform(XDeTest) # Prédictions sur le test set y_predDeTest_knn = knn.predict(XDeTest) y_predDeTest_reglog = reglog.predict(XDeTest) from sklearn.metrics import accuracy_score print(f'Score sur base test en KNN: {accuracy_score(yDeTest,y_predDeTest_knn)}' ) print(f'Score sur base test en REg Log: {accuracy_score(yDeTest,y_predDeTest_reglog)}' ) from sklearn.metrics import classification_report print(f' KNN {classification_report(yDeTest,y_predDeTest_knn)}') print(f' REG LOG {classification_report(yDeTest,y_predDeTest_reglog)}') XDeTest.shape
0.409221
0.783243
<a href="https://colab.research.google.com/github/emagc/daa_2021_1/blob/master/Tarea11.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` class NodoArbol: def __init__(self, value, left=None, right=None): self.data=value self.left=left self.right=right class BinarySearchTree: def __init__(self): self._root=None def insert(self, value): if self._root==None: self._root=NodoArbol(value) else: self._insert_nodo_(self._root, value) def _insert_nodo_(self, nodo, value): if nodo.data==value: pass elif value<nodo.data: if nodo.left==None: nodo.left=NodoArbol(value) else: self._insert_nodo_(nodo.left,value) else: if nodo.right==None: nodo.right=NodoArbol(value) else: self._insert_nodo_(nodo.right,value) def buscar(self, value): if self._root==None: return None else: return self.__busca_nodo(self._root,value) def __busca_nodo(self, nodo, value): if nodo==None: return None elif nodo.data==value: return nodo elif value<nodo.data: return self.__busca_nodo(nodo.left,value) else: return self.__busca_nodo(nodo.right,value) def transversal(self, format="inorden"): if format=="inorden": self.__recorrido_in(self._root) elif format=="preorden": self.__recorrido_pre(self._root) elif format=="posorden": self.__recorrido_pos(self._root) else: print("Formato de recorrido no valido") def __recorrido_pre(self, nodo): if nodo!=None: print(nodo.data,end=",") self.__recorrido_pre(nodo.left) self.__recorrido_pre(nodo.right) def __recorrido_in(self, nodo): if nodo!=None: self.__recorrido_in(nodo.left) print(nodo.data,end=",") self.__recorrido_in(nodo.right) def __recorrido_pos(self, nodo): if nodo!=None: self.__recorrido_pos(nodo.left) self.__recorrido_pos(nodo.right) print(nodo.data,end=",") def eliminar(self, value): self.__eliminar_nodo(self._root, self._root, self._root, None, value) def __eliminar_nodo(self, root, nodo, anterior, actual, value): if nodo==None: return print("No existe ese nodo") if nodo.data==value: if nodo.left==None and nodo.right==None: if actual=="izq": anterior.left=None elif actual=="der": anterior.right=None print("solo se borro el nodo") elif nodo.left==None and nodo.right!=None: if actual=="izq": anterior.left=nodo.right else: anterior.right=nodo.right print("se paso el unico nodo derecho hacia arriba") elif nodo.left!=None and nodo.right==None: if actual=="izq": anterior.left=nodo.left else: anterior.right=nodo.left print("se paso el unico nodo izquierdo hacia arriba") elif nodo.left!=None and nodo.right!=None: print("se hizo algo complejo") tmp,anterior2 =self.nodoMasBajo(nodo.right, nodo) if nodo.data==anterior2.data: anterior2.right=tmp.right elif nodo.data!=anterior2.data: anterior2.left=tmp.right if actual=="izq": anterior.left=tmp else: anterior.right=tmp tmp.left=nodo.left tmp.right=nodo.right elif value<nodo.data: return self.__eliminar_nodo(root, nodo.left, nodo, "izq", value) else: return self.__eliminar_nodo(root, nodo.right, nodo, "der", value) def nodoMasBajo(self, nodo, anterior): if nodo.left==None: return nodo, anterior elif nodo.left!=None: return self.nodoMasBajo(nodo.left, nodo) arbol=BinarySearchTree() arbol.insert(50) arbol.insert(40) arbol.insert(80) arbol.insert(20) arbol.insert(45) arbol.insert(60) arbol.insert(90) arbol.insert(85) arbol.insert(100) arbol.insert(95) print(arbol.transversal()) print("____") arbol.eliminar(80) print("______") print(arbol._root.data) print(arbol._root.left.data) print(arbol._root.right.data) print(arbol._root.left.left.data) print(arbol._root.right.left.data) print(arbol.transversal()) print(arbol._root.right.right.right.left.data) ```
github_jupyter
class NodoArbol: def __init__(self, value, left=None, right=None): self.data=value self.left=left self.right=right class BinarySearchTree: def __init__(self): self._root=None def insert(self, value): if self._root==None: self._root=NodoArbol(value) else: self._insert_nodo_(self._root, value) def _insert_nodo_(self, nodo, value): if nodo.data==value: pass elif value<nodo.data: if nodo.left==None: nodo.left=NodoArbol(value) else: self._insert_nodo_(nodo.left,value) else: if nodo.right==None: nodo.right=NodoArbol(value) else: self._insert_nodo_(nodo.right,value) def buscar(self, value): if self._root==None: return None else: return self.__busca_nodo(self._root,value) def __busca_nodo(self, nodo, value): if nodo==None: return None elif nodo.data==value: return nodo elif value<nodo.data: return self.__busca_nodo(nodo.left,value) else: return self.__busca_nodo(nodo.right,value) def transversal(self, format="inorden"): if format=="inorden": self.__recorrido_in(self._root) elif format=="preorden": self.__recorrido_pre(self._root) elif format=="posorden": self.__recorrido_pos(self._root) else: print("Formato de recorrido no valido") def __recorrido_pre(self, nodo): if nodo!=None: print(nodo.data,end=",") self.__recorrido_pre(nodo.left) self.__recorrido_pre(nodo.right) def __recorrido_in(self, nodo): if nodo!=None: self.__recorrido_in(nodo.left) print(nodo.data,end=",") self.__recorrido_in(nodo.right) def __recorrido_pos(self, nodo): if nodo!=None: self.__recorrido_pos(nodo.left) self.__recorrido_pos(nodo.right) print(nodo.data,end=",") def eliminar(self, value): self.__eliminar_nodo(self._root, self._root, self._root, None, value) def __eliminar_nodo(self, root, nodo, anterior, actual, value): if nodo==None: return print("No existe ese nodo") if nodo.data==value: if nodo.left==None and nodo.right==None: if actual=="izq": anterior.left=None elif actual=="der": anterior.right=None print("solo se borro el nodo") elif nodo.left==None and nodo.right!=None: if actual=="izq": anterior.left=nodo.right else: anterior.right=nodo.right print("se paso el unico nodo derecho hacia arriba") elif nodo.left!=None and nodo.right==None: if actual=="izq": anterior.left=nodo.left else: anterior.right=nodo.left print("se paso el unico nodo izquierdo hacia arriba") elif nodo.left!=None and nodo.right!=None: print("se hizo algo complejo") tmp,anterior2 =self.nodoMasBajo(nodo.right, nodo) if nodo.data==anterior2.data: anterior2.right=tmp.right elif nodo.data!=anterior2.data: anterior2.left=tmp.right if actual=="izq": anterior.left=tmp else: anterior.right=tmp tmp.left=nodo.left tmp.right=nodo.right elif value<nodo.data: return self.__eliminar_nodo(root, nodo.left, nodo, "izq", value) else: return self.__eliminar_nodo(root, nodo.right, nodo, "der", value) def nodoMasBajo(self, nodo, anterior): if nodo.left==None: return nodo, anterior elif nodo.left!=None: return self.nodoMasBajo(nodo.left, nodo) arbol=BinarySearchTree() arbol.insert(50) arbol.insert(40) arbol.insert(80) arbol.insert(20) arbol.insert(45) arbol.insert(60) arbol.insert(90) arbol.insert(85) arbol.insert(100) arbol.insert(95) print(arbol.transversal()) print("____") arbol.eliminar(80) print("______") print(arbol._root.data) print(arbol._root.left.data) print(arbol._root.right.data) print(arbol._root.left.left.data) print(arbol._root.right.left.data) print(arbol.transversal()) print(arbol._root.right.right.right.left.data)
0.390592
0.677322
# Continuous Control Project Report --- ### 1.Examination of State and Action Spaces In this environment, a double-jointed arm can move to target locations. A reward of +0.1 is provided for each step that the agent's hand is in the goal location. Thus, the goal of your agent is to maintain its position at the target location for as many time steps as possible. The observation space consists of 33 variables corresponding to position, rotation, velocity, and angular velocities of the arm. Each action is a vector with four numbers, corresponding to torque applicable to two joints. Every entry in the action vector should be a number between -1 and 1. ### 2. Algorithm Algorithm is DDPG. DDPG algorithm uses two seperate network which are working collaboratively; actor and critic. Actor network does policiy approximation where critic does value estimation. Training of this network composed out of two steps; acting and learning. In acting step, agent gives state vector as an input to Actor network and receives actions to be taken. In learning step, Critic network evaluates correctness of action taken and gives feed to Actor network so Actor network adjust its weights accordingly. ``` from model import Actor, Critic from agent import Agent import pandas as pd import matplotlib.pyplot as plt import random import copy from collections import namedtuple, deque import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim agent = Agent(state_size=state_size, action_size=action_size, random_seed=10) def ddpg(n_episodes=2000, max_t=10000): scores = [] scores_deque = deque(maxlen=100) for i_episode in range(1, n_episodes+1): env_info = env.reset(train_mode=True)[brain_name] state = env_info.vector_observations[0] score = 0 for t in range(max_t): action = agent.act(state) env_info = env.step(action)[brain_name] next_state = env_info.vector_observations[0] reward = env_info.rewards[0] done = env_info.local_done[0] agent.step(state, action, reward, next_state, done) score += reward state = next_state if done: break scores_deque.append(score) scores.append(score) print('\rEpisode {} score: {:.2f}\tAverage Score: {:.2f}'.format(i_episode, score, np.mean(scores_deque))) print('\rAfter Episode {} average score is: {:.2f}'.format(i_episode, np.mean(scores_deque))) torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth') torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth') if np.mean(scores_deque)>=30.0: print('\nEnvironment solved in {:d} episodes and average score is: {:.2f}'.format(i_episode, np.mean(scores_deque))) torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth') torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth') break return scores ``` ### 3. Networks #### Actor Network input size = 33 output size = 4 2 hidden layers and one output layer each hidden layer has 256 hidden units and is followed by a ReLU activation layer A batch normalization layer after the first layer Output layer is followed by a tanh activation layer #### Critic Network input size = 4 output size = 1 2 hidden layers and one output layer each hidden layer has 256 hidden units and is followed by a ReLU activation layer A batch normalization layer after the first layer Output layer is followed by a linear activation unit #### Hyperparameters BUFFER_SIZE = int(1e5) BATCH_SIZE = 128 GAMMA = 0.99 TAU = 0.001 LR_ACTOR = 0.0001 LR_CRITIC = 0.0001 WEIGHT_DECAY = 0 ``` scores = ddpg(n_episodes=5000) ``` ### 4. Plot of Reward ``` fig, ax = plt.subplots(1, 1, figsize=[10, 5]) plt.rcParams.update({'font.size': 14}) scores_avg = pd.Series(scores).rolling(100).mean() ax.plot(scores, "-", c="black", alpha=0.25) ax.plot(scores_avg, "-", c="red", linewidth=2) ax.set_xlabel("Episode") ax.set_ylabel("Score") ax.axhline(30, c="blue", linewidth=2) ax.legend(["Score of Each Episode", "Moving Average of last 100 Episode", "Criteria"]) fig.tight_layout() env.close() ``` ### 5. Future Work There are more network would be reasonable to try out if better performance can be observed like like PPO, A3C, and D4PG.A3C can be tried first in order to do parallel training. A3C: Waiting to experience the environment for a little longer before to calculate expected return of original state. Less bias, keep variance in control. Faster convergence with less experience. A3C replaces replay buffer with parallel training using parallel agents.
github_jupyter
from model import Actor, Critic from agent import Agent import pandas as pd import matplotlib.pyplot as plt import random import copy from collections import namedtuple, deque import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim agent = Agent(state_size=state_size, action_size=action_size, random_seed=10) def ddpg(n_episodes=2000, max_t=10000): scores = [] scores_deque = deque(maxlen=100) for i_episode in range(1, n_episodes+1): env_info = env.reset(train_mode=True)[brain_name] state = env_info.vector_observations[0] score = 0 for t in range(max_t): action = agent.act(state) env_info = env.step(action)[brain_name] next_state = env_info.vector_observations[0] reward = env_info.rewards[0] done = env_info.local_done[0] agent.step(state, action, reward, next_state, done) score += reward state = next_state if done: break scores_deque.append(score) scores.append(score) print('\rEpisode {} score: {:.2f}\tAverage Score: {:.2f}'.format(i_episode, score, np.mean(scores_deque))) print('\rAfter Episode {} average score is: {:.2f}'.format(i_episode, np.mean(scores_deque))) torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth') torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth') if np.mean(scores_deque)>=30.0: print('\nEnvironment solved in {:d} episodes and average score is: {:.2f}'.format(i_episode, np.mean(scores_deque))) torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth') torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth') break return scores scores = ddpg(n_episodes=5000) fig, ax = plt.subplots(1, 1, figsize=[10, 5]) plt.rcParams.update({'font.size': 14}) scores_avg = pd.Series(scores).rolling(100).mean() ax.plot(scores, "-", c="black", alpha=0.25) ax.plot(scores_avg, "-", c="red", linewidth=2) ax.set_xlabel("Episode") ax.set_ylabel("Score") ax.axhline(30, c="blue", linewidth=2) ax.legend(["Score of Each Episode", "Moving Average of last 100 Episode", "Criteria"]) fig.tight_layout() env.close()
0.674265
0.975969
# DESAFIO 1 Use o NLTK para criar um pipeline que realize as seguintes tarefas, nesta ordem: Tokenization, Sentence Splitting, Lemmatization, Stemming e POS tagging Em seguida gere as seguintes informações estatísticas e gráficos de barras em relação ao texto em inglês task1.txt: Quantas palavras temos em todo o texto? Quantos radicais (stemming) diferentes existem? Qual o número de sentenças e a média de tokens por sentença? Gere um gráfico de barra do conjunto de POS tags de todas as palavras do texto. Ordene os resultados e responda: quais classes gramaticais correspondem a mais de 70 ou 80% do total? ``` import nltk import pandas as pd import matplotlib.pyplot as plt from string import punctuation from collections import Counter nltk.download('punkt') nltk.download('wordnet') nltk.download('averaged_perceptron_tagger') lemmatizer = nltk.WordNetLemmatizer() stemmer = nltk.stem.PorterStemmer() def read_file(filename: str) -> str: with open(filename, 'r', encoding='utf-8') as file: return file.read() def lematize_tokens(tokens: list) -> list: return [lemmatizer.lemmatize(token) for token in tokens] def stem_tokens(tokens: list) -> list: return [stemmer.stem(token) for token in tokens] def get_onlywords_from_tokens(tokens: list) -> list: # usa-se token[0] pois caracteres como ´´ não são pegos na validação. Então nesse caso fica apenas ` que contém no punctuation return [token for token in tokens if token[0] not in punctuation and not token.isdigit()] def get_qtd_tokens_for_sentence(sentence: str) -> int: return len([word for word in nltk.word_tokenize(sentence)]) def get_average_tokens_for_sentence(sentence_list: list) -> int: return sum(sentence_list) / len(sentence_list) ``` ## Tokenization, Sentence Splitting, Lemmatization, Stemming e POS tagging ``` text_1 = read_file('task_1.txt') text_1_word_tokens = nltk.word_tokenize(text_1) text_1_sent_tokens = nltk.sent_tokenize(text_1) text_1_lemmas = lematize_tokens(text_1_word_tokens) text_1_stems = stem_tokens(text_1_word_tokens) text_1_taggs = nltk.pos_tag(text_1_word_tokens) ``` ## Quantas palavras temos em todo o texto? (também exluindo números) ``` text_1_onlywords = set(get_onlywords_from_tokens(text_1_word_tokens)) print(f"No text_1 existem {len(text_1_onlywords)} palavras.") ``` ## Quantos radicais (stemming) diferentes existem? (contando apenas com palavras) ``` text_1_onlyword_stemms = stem_tokens(text_1_onlywords) text1_vocab = set(text_1_onlyword_stemms) print(f'No text_1 exitem {len(text1_vocab)} radicais diferentes.') ``` ## Qual o número de sentenças e a média de tokens por sentença? ``` print(f"Existem {len(text_1_sent_tokens)} sentenças no texto 1\n \n") tokens_for_sentence = { sentence: get_qtd_tokens_for_sentence(sentence) for sentence in text_1_sent_tokens} for sentence, qtd in tokens_for_sentence.items(): print(f'A sentença "{sentence}" contém {qtd} tokens.\n') average = get_average_tokens_for_sentence(list(tokens_for_sentence.values())) print(f'Média de *tokens* por sentença é {average:.2f}.') ``` ## Gere um gráfico de barra do conjunto de POS tags de todas as palavras do texto. Ordene os resultados e responda: quais classes gramaticais correspondem a mais de 70 ou 80% do total? ``` taggs = nltk.pos_tag(text_1_onlywords) counts = Counter( tag for (word, tag) in taggs) print(counts) fig = plt.figure() ax = fig.add_axes([1,1,2,2]) ax.bar(counts.keys(), counts.values(), width=0.5, color="red") ax.legend(labels=['Count']) plt.show() print(f"Os 3 radicais mais comuns são: {counts.most_common(3)}.") ```
github_jupyter
import nltk import pandas as pd import matplotlib.pyplot as plt from string import punctuation from collections import Counter nltk.download('punkt') nltk.download('wordnet') nltk.download('averaged_perceptron_tagger') lemmatizer = nltk.WordNetLemmatizer() stemmer = nltk.stem.PorterStemmer() def read_file(filename: str) -> str: with open(filename, 'r', encoding='utf-8') as file: return file.read() def lematize_tokens(tokens: list) -> list: return [lemmatizer.lemmatize(token) for token in tokens] def stem_tokens(tokens: list) -> list: return [stemmer.stem(token) for token in tokens] def get_onlywords_from_tokens(tokens: list) -> list: # usa-se token[0] pois caracteres como ´´ não são pegos na validação. Então nesse caso fica apenas ` que contém no punctuation return [token for token in tokens if token[0] not in punctuation and not token.isdigit()] def get_qtd_tokens_for_sentence(sentence: str) -> int: return len([word for word in nltk.word_tokenize(sentence)]) def get_average_tokens_for_sentence(sentence_list: list) -> int: return sum(sentence_list) / len(sentence_list) text_1 = read_file('task_1.txt') text_1_word_tokens = nltk.word_tokenize(text_1) text_1_sent_tokens = nltk.sent_tokenize(text_1) text_1_lemmas = lematize_tokens(text_1_word_tokens) text_1_stems = stem_tokens(text_1_word_tokens) text_1_taggs = nltk.pos_tag(text_1_word_tokens) text_1_onlywords = set(get_onlywords_from_tokens(text_1_word_tokens)) print(f"No text_1 existem {len(text_1_onlywords)} palavras.") text_1_onlyword_stemms = stem_tokens(text_1_onlywords) text1_vocab = set(text_1_onlyword_stemms) print(f'No text_1 exitem {len(text1_vocab)} radicais diferentes.') print(f"Existem {len(text_1_sent_tokens)} sentenças no texto 1\n \n") tokens_for_sentence = { sentence: get_qtd_tokens_for_sentence(sentence) for sentence in text_1_sent_tokens} for sentence, qtd in tokens_for_sentence.items(): print(f'A sentença "{sentence}" contém {qtd} tokens.\n') average = get_average_tokens_for_sentence(list(tokens_for_sentence.values())) print(f'Média de *tokens* por sentença é {average:.2f}.') taggs = nltk.pos_tag(text_1_onlywords) counts = Counter( tag for (word, tag) in taggs) print(counts) fig = plt.figure() ax = fig.add_axes([1,1,2,2]) ax.bar(counts.keys(), counts.values(), width=0.5, color="red") ax.legend(labels=['Count']) plt.show() print(f"Os 3 radicais mais comuns são: {counts.most_common(3)}.")
0.562177
0.810179
<a href="https://colab.research.google.com/github/nadirbelarouci/NN-overfitting-isgood/blob/master/NN_overfitting_is_good.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> #Neural Networks Overfitting is GOOD Overffiting may caused a lot of problems to many machine learning developers, and mostly all articles discuess how to avoid it, however in this notebook we will see what if overfitting is actually good in some senarios. The idea is to use overfetting to memorize bytes of data into a neural network, the ordinary way that we do this is to map each index to one byte. Our apprach here is to map multipe indices to the same byte. what do we get from this ? - a lossless compression algorithm - a read only data: any changes to the weights of the neural network will impact the data, thus assuring its integrity - an abstraction to the type of data The following is an example demonstrating the goal of the idea. We will be using a neural network of 3 layers: - the intput layer is an index mapping a byte in a 32 bit format - a hidden layer of size 64 - the output layer which represents the byte itself. ``` import tensorflow as tf import numpy as np ``` ### Preparing the data - The data is represented as long enough string. - The string will be represented by `x_train` and `y_train` vectors - Each training data features are represented as a 32 bit format of the index - Each training example label is the rank of the letter in the alphabet: A is 0, B is 1 etc and SPACE is 27. - In a typical senario, the size of the possible labels is 255. ``` #CONSTANTS INPUT_LAYER_SIZE = 32 LABELS_SIZE = 27 def prepare_data(): random_text = "BASED ON YOUR INPUT GET A RANDOM ALPHA NUHE RANDOM STRING GENERATOR CREATES A SERIES OF AND LETTERS ALRBRFNKPZNJEBPCZ FHGDQPZLHE HELLO THERE HOW ARE YOU DOING TODAY AND BLA BLA BLA" length = len(random_text) x_train = np.zeros((INPUT_LAYER_SIZE,length)) y_train = np.zeros(length) for i in range(length): x = [int(x) for x in bin(i)[2:]] # convert the index to a list of bits x = np.pad(x,(INPUT_LAYER_SIZE-len(x),0),'constant') # add 0 to the lists, size = INPUT_LAYER_SIZE x_train[:,i] = x y_train[i] = LABELS_SIZE-1 if random_text[i]==' ' else ord(random_text[i]) - 65 # map each char to its rank in the alphabet except the space x_train = x_train.T y_train = y_train.T return x_train, y_train x_train, y_train = prepare_data() print(x_train.shape,y_train.shape) # Creating the neural network as described above model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(64, activation=tf.nn.relu), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(LABELS_SIZE, activation=tf.nn.softmax) ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) # Train neural network until we overfitte the model score = 0.0 while score < 1.0: history = model.fit(x_train, y_train, epochs=5,verbose=False) l = len(history.history['acc'])-1 score = history.history['acc'][l]; score = model.evaluate(x_train, y_train) print(score) # get the predictions predictions = np.argmax(model.predict(x_train),axis=1) # convert them to the appropriate string format to_char = lambda x: " " if x == LABELS_SIZE-1 else chr(x+65) text = "".join(list(map(to_char, predictions))) print (text) ``` Eventough the proof of concept work, the neural network can memorize the data, this example needs optimisation and don't answer to the real problem of compressing data since the actual size of the neural network weights is way bigger than the actual size of the string which is (128 bytes). Thank you for reading. ``` Author: Nadir Belarouci Twitter: @nadirbelarouci Github: nadirbelarouci ``` ``` ```
github_jupyter
import tensorflow as tf import numpy as np #CONSTANTS INPUT_LAYER_SIZE = 32 LABELS_SIZE = 27 def prepare_data(): random_text = "BASED ON YOUR INPUT GET A RANDOM ALPHA NUHE RANDOM STRING GENERATOR CREATES A SERIES OF AND LETTERS ALRBRFNKPZNJEBPCZ FHGDQPZLHE HELLO THERE HOW ARE YOU DOING TODAY AND BLA BLA BLA" length = len(random_text) x_train = np.zeros((INPUT_LAYER_SIZE,length)) y_train = np.zeros(length) for i in range(length): x = [int(x) for x in bin(i)[2:]] # convert the index to a list of bits x = np.pad(x,(INPUT_LAYER_SIZE-len(x),0),'constant') # add 0 to the lists, size = INPUT_LAYER_SIZE x_train[:,i] = x y_train[i] = LABELS_SIZE-1 if random_text[i]==' ' else ord(random_text[i]) - 65 # map each char to its rank in the alphabet except the space x_train = x_train.T y_train = y_train.T return x_train, y_train x_train, y_train = prepare_data() print(x_train.shape,y_train.shape) # Creating the neural network as described above model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(64, activation=tf.nn.relu), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(LABELS_SIZE, activation=tf.nn.softmax) ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) # Train neural network until we overfitte the model score = 0.0 while score < 1.0: history = model.fit(x_train, y_train, epochs=5,verbose=False) l = len(history.history['acc'])-1 score = history.history['acc'][l]; score = model.evaluate(x_train, y_train) print(score) # get the predictions predictions = np.argmax(model.predict(x_train),axis=1) # convert them to the appropriate string format to_char = lambda x: " " if x == LABELS_SIZE-1 else chr(x+65) text = "".join(list(map(to_char, predictions))) print (text) Author: Nadir Belarouci Twitter: @nadirbelarouci Github: nadirbelarouci
0.466359
0.982339
![license_header_logo](../../../images/license_header_logo.png) > **Copyright (c) 2021 CertifAI Sdn. Bhd.**<br> <br> This program is part of OSRFramework. You can redistribute it and/or modify <br>it under the terms of the GNU Affero General Public License as published by <br>the Free Software Foundation, either version 3 of the License, or <br>(at your option) any later version. <br> <br>This program is distributed in the hope that it will be useful <br>but WITHOUT ANY WARRANTY; without even the implied warranty of <br>MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the <br>GNU Affero General Public License for more details. <br> <br>You should have received a copy of the GNU Affero General Public License <br>along with this program. If not, see <http://www.gnu.org/licenses/>. <br> # Introduction This handson is to demonstrate how to implement the tools, transformers and models from the HuggingFace library to create our own Machine Translation Model. Link to the HuggingFace Library: https://huggingface.co/models?sort=downloads You may check out slides Day 12 - Pretrained Model for NLP & Generalized Language Model, to get a detailed explanation and walkthrough on this handson. The walkthrough includes: 1. Explaination of code 2. How to find the model we want from HuggingFace and implement them into the code # What we will accomplish? 1. Use transformers in HuggingFace 2. Import the correct model from HuggingFace 3. Create your own Machine Translation model # Instructions Read the code and execute them according to the instructions provided. If you are having trouble understanding the code, you may take a look at slides, Day 12 - Pretrained Model for NLP & Generalized Language Model, Machine Translation Handson to get a better understanding. # Part 1: Code and its explanation First, we will install the required libraries. torch refers to PyTorch library and transformers refers to the HuggingFace transformers library. We need to install PyTorch in order to utilize the HuggingFace models and transformers. If you already have installed, you can skip this step. AutoModelForSeq2SeqLM is where machine translation models fall under in the HuggingFace library. AutoTokenizer is where we can define tokenizers from the HuggingFace library. Pipeline is a method where we can automate the workflow to produce machine learning model. ``` from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline ``` model is where we will be defining the machine translation model we are importing a pretrained model (indicated by from_pretraied) that is the Helsinki model tokenizer is where we define tokenizer, also this is a tokenizer from pretrained Helsinki model translation is where we call the pipeline method to automate the machine translation workflow here we defined what process it is going to in the parameters first parameter, “translation_mul_to_en” means translation of multi language to english second parameter, model=model is just us inserting the model we already initialized above third parameter, tokenizer=tokenizer is also just us inserting the tokenizer already initialized above ``` model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-mul-en") tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-mul-en") translation = pipeline("translation", model=model, tokenizer=tokenizer) ``` text is obviously the text that we want to translate translated_text is where we define translated text (Malay to English translated text) the translation method will translate the text to English this method will return a dict, so we want to print only the content of the first element[0] in the dict tagged with ‘translation_text’ ``` text = "Nama saya Micheal, siapakah nama awak?" translated_text = translation(text)[0]['translation_text'] print(translated_text) ``` # Part 2: Choosing the proper model from HuggingFace In this task, you are required to go to the hugging face website to look for the model that can translate english to chinese. HuggingFace website: https://huggingface.co/models?sort=downloads If you are having trouble with this task, take a look at slides Day 12 - Pretrained Model for NLP & Generalized Language Model, to get a detailed explanation and walkthrough on this handson. You will have to find the appropriate model, copy the name of the model provided and paste it into the model and tokenizer parameters. ``` from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline # model = AutoModelForSeq2SeqLM.from_pretrained("COPY AND PASTE THE MODEL NAME HERE") # tokenizer = AutoTokenizer.from_pretrained("COPY AND PASTE THE MODEL NAME HERE") # translation = pipeline("translation", model=model, tokenizer=tokenizer) # text = "Hi, how are you?" # translated_text = translation(text)[0]['translation_text'] # print(translated_text) ``` # Summary Now you know how to create your own machine translation using HuggingFace Library. # Contributors Author Pahvindran Raj
github_jupyter
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-mul-en") tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-mul-en") translation = pipeline("translation", model=model, tokenizer=tokenizer) text = "Nama saya Micheal, siapakah nama awak?" translated_text = translation(text)[0]['translation_text'] print(translated_text) from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline # model = AutoModelForSeq2SeqLM.from_pretrained("COPY AND PASTE THE MODEL NAME HERE") # tokenizer = AutoTokenizer.from_pretrained("COPY AND PASTE THE MODEL NAME HERE") # translation = pipeline("translation", model=model, tokenizer=tokenizer) # text = "Hi, how are you?" # translated_text = translation(text)[0]['translation_text'] # print(translated_text)
0.430866
0.838614
# Decentralization Planning ## Objective and Prerequisites Ready for a mathematical optimization modeling challenge? Put your skills to the test with this example, where you’ll learn how to model and solve a decentralization planning problem. You’ll have to figure out – given a set of departments of a company, and potential cities where these departments can be located – the “best” location for each department in order to maximize gross margins. This model is example 10 from the fifth edition of Model Building in Mathematical Programming by H. Paul Williams on pages 265 and 317-319. This modeling example is at the advanced level, where we assume that you know Python and the Gurobi Python API and that you have advanced knowledge of building mathematical optimization models. Typically, the objective function and/or constraints of these examples are complex or require advanced features of the Gurobi Python API. **Download the Repository** <br /> You can download the repository containing this and other examples by clicking [here](https://github.com/Gurobi/modeling-examples/archive/master.zip). **Gurobi License** <br /> In order to run this Jupyter Notebook properly, you must have a Gurobi license. If you do not have one, you can request an [evaluation license](https://www.gurobi.com/downloads/request-an-evaluation-license/?utm_source=3PW&utm_medium=OT&utm_campaign=WW-MU-MUI-OR-O_LEA-PR_NO-Q3_FY20_WW_JPME_DECENTRALIZATION_COM_EVAL_GitHub&utm_term=Decentralization_Planning&utm_content=C_JPM) as a *commercial user*, or download a [free license](https://www.gurobi.com/academia/academic-program-and-licenses/?utm_source=3PW&utm_medium=OT&utm_campaign=WW-MU-MUI-OR-O_LEA-PR_NO-Q3_FY20_WW_JPME_DECENTRALIZATION_ACADEMIC_EVAL_GitHub&utm_term=Decentralization_Planning&utm_content=C_JPM) as an *academic user*. ## Problem Description A large company wants to move some of its departments out of London. Doing so will result in reduced costs in some areas (such as cheaper housing, government incentives, easier recruitment, etc.), and increased costs in other areas (such as communication between departments). The cost implications for all possible locations of each department have been calculated. The goal is to determine where to locate each department in order to maximize the total difference between the reduced costs from relocating and the increased communication costs between departments. The company comprises five departments (A, B, C, D and E). The possible cities for relocation are Bristol and Brighton, or a department may be kept in London. None of these cities (including London) may be the location for more than three of the departments. ## Model Formulation ### Sets and Indices $d,d2 \in \text{Departments}=\{A,B,C,D,E\}$ $c,c2 \in \text{Cities}=\{\text{Bristol}, \text{Brighton}, \text{London}\}$ ### Parameters $\text{benefit}_{d,c} \in \mathbb{R}^+$: Benefit -in thousands of dollars per year, derived from relocating department $d$ to city $c$. $\text{communicationCost}_{d,c,d2,c2} \in \mathbb{R}^+$: Communication cost -in thousands of dollars per year, derived from relocating department $d$ to city $c$ and relocating department $d2$ to city $c2$. We define the set $dcd2c2 = \{(d,c,d2,c2) \in \text{Departments} \times \text{Cities} \times \text{Departments} \times \text{Cities}: \text{communicationCost}_{d,c,d2,c2} > 0 \}$ ### Decision Variables $\text{locate}_{d,c} \in \{0,1 \}$: This binary variable is equal 1, if department $d$ is located at city $c$, and 0 otherwise. $y_{d,c,d2,c2} = \text{locate}_{d,c}*\text{locate}_{d2,c2} \in \{0,1 \}$: This auxiliary binary variable is equal 1, if department $d$ is located at city $c$ and department $d2$ is located at city $c2$, and 0 otherwise. ### Constraints **Department location**: Each department must be located in only one city. \begin{equation} \sum_{c \in \text{Cities}} \text{locate}_{d,c} = 1 \quad \forall d \in \text{Departments} \end{equation} **Departments limit**: No city may be the location for more than three departments. \begin{equation} \sum_{d \in \text{Departments}} \text{locate}_{d,c} \leq 3 \quad \forall c \in \text{Cities} \end{equation} **Logical Constraints**: - If $y_{d,c,d2,c2} = 1$ then $\text{locate}_{d,c} = 1$ and $\text{locate}_{d2,c2} = 1$. \begin{equation} y_{d,c,d2,c2} \leq \text{locate}_{d,c} \quad \forall (d,c,d2,c2) \in dcd2c2 \end{equation} \begin{equation} y_{d,c,d2,c2} \leq \text{locate}_{d2,c2} \quad \forall (d,c,d2,c2) \in dcd2c2 \end{equation} - If $\text{locate}_{d,c} = 1$ and $\text{locate}_{d2,c2} = 1 $ then $y_{d,c,d2,c2} = 1$. \begin{equation} \text{locate}_{d,c} + \text{locate}_{d2,c2} - y_{d,c,d2,c2} \leq 1 \quad \forall (d,c,d2,c2) \in dcd2c2 \end{equation} ### Objective Function **Gross margin**: Maximize the gross margin of relocation. \begin{equation} \text{Maximize} \quad Z = \sum_{d \in \text{Departments}} \sum_{c \in \text{Cities}} \text{benefit}_{d,c}*\text{locate}_{d,c} - \sum_{d,c,d2,c2 \in dcd2c2} \text{communicationCost}_{d,c,d2,c2}*y_{d,c,d2,c2} \end{equation} This linear integer programming formulation of the decentralization problem is in fact a linearization of a quadratic assignment formulation of this problem. With Gurobi 9.0, you can directly solve the quadratic assignment formulation of the decentralization problem without the auxiliary variables and the logical constraints. ### Objective Function **Gross margin**: Maximize the gross margin of relocation. \begin{equation} \text{Maximize} \quad Z = \sum_{d \in \text{Departments}} \sum_{c \in \text{Cities}} \text{benefit}_{d,c}*\text{locate}_{d,c} - \sum_{d,c,d2,c2 \in dcd2c2} \text{communicationCost}_{d,c,d2,c2}*\text{locate}_{d,c}*\text{locate}_{d2,c2} \end{equation} ### Constraints **Department location**: Each department must be located in only one city. \begin{equation} \sum_{c \in \text{Cities}} \text{locate}_{d,c} = 1 \quad \forall d \in \text{Departments} \end{equation} **Departments limit**: No city may be the location for more than three departments. \begin{equation} \sum_{d \in \text{Departments}} \text{locate}_{d,c} \leq 3 \quad \forall c \in \text{Cities} \end{equation} ## Python Implementation We import the Gurobi Python Module and other Python libraries. ``` import pandas as pd import gurobipy as gp from gurobipy import GRB # tested with Python 3.7.0 & Gurobi 9.0 ``` ## Input data We define all the input data for the model. ``` # Lists of deparments and cities Deparments = ['A','B','C','D','E'] Cities = ['Bristol', 'Brighton', 'London'] # Create a dictionary to capture benefits -in thousands of dollars from relocation. d2c, benefit = gp.multidict({ ('A', 'Bristol'): 10, ('A', 'Brighton'): 10, ('A', 'London'): 0, ('B', 'Bristol'): 15, ('B', 'Brighton'): 20, ('B', 'London'): 0, ('C', 'Bristol'): 10, ('C', 'Brighton'): 15, ('C', 'London'): 0, ('D', 'Bristol'): 20, ('D', 'Brighton'): 15, ('D', 'London'): 0, ('E', 'Bristol'): 5, ('E', 'Brighton'): 15, ('E', 'London'): 0 }) # Create a dictionary to capture the communication costs -in thousands of dollars from relocation. dcd2c2, communicationCost = gp.multidict({ ('A','London','C','Bristol'): 13, ('A','London','C','Brighton'): 9, ('A','London','C','London'): 10, ('A','London','D','Bristol'): 19.5, ('A','London','D','Brighton'): 13.5, ('A','London','D','London'): 15, ('B','London','C','Bristol'): 18.2, ('B','London','C','Brighton'): 12.6, ('B','London','C','London'): 14, ('B','London','D','Bristol'): 15.6, ('B','London','D','Brighton'): 10.8, ('B','London','D','London'): 12, ('C','London','E','Bristol'): 26, ('C','London','E','Brighton'): 18, ('C','London','E','London'): 20, ('D','London','E','Bristol'): 9.1, ('D','London','E','Brighton'): 6.3, ('D','London','E','London'): 7, ('A','Bristol','C','Bristol'): 5, ('A','Bristol','C','Brighton'): 14, ('A','Bristol','C','London'): 13, ('A','Bristol','D','Bristol'): 7.5, ('A','Bristol','D','Brighton'): 21, ('A','Bristol','D','London'): 19.5, ('B','Bristol','C','Bristol'): 7, ('B','Bristol','C','Brighton'): 19.6, ('B','Bristol','C','London'): 18.2, ('B','Bristol','D','Bristol'): 6, ('B','Bristol','D','Brighton'): 16.8, ('B','Bristol','D','London'): 15.6, ('C','Bristol','E','Bristol'): 10, ('C','Bristol','E','Brighton'): 28, ('C','Bristol','E','London'): 26, ('D','Bristol','E','Bristol'): 3.5, ('D','Bristol','E','Brighton'): 9.8, ('D','Bristol','E','London'): 9.1, ('A','Brighton','C','Bristol'): 14, ('A','Brighton','C','Brighton'): 5, ('A','Brighton','C','London'): 9, ('A','Brighton','D','Bristol'): 21, ('A','Brighton','D','Brighton'): 7.5, ('A','Brighton','D','London'): 13.5, ('B','Brighton','C','Bristol'): 19.6, ('B','Brighton','C','Brighton'): 7, ('B','Brighton','C','London'): 12.6, ('B','Brighton','D','Bristol'): 16.8, ('B','Brighton','D','Brighton'): 6, ('B','Brighton','D','London'): 10.8, ('C','Brighton','E','Bristol'): 28, ('C','Brighton','E','Brighton'): 10, ('C','Brighton','E','London'): 18, ('D','Brighton','E','Bristol'): 9.8, ('D','Brighton','E','Brighton'): 3.5, ('D','Brighton','E','London'): 6.3 }) ``` ## Model Deployment We create a model and the variables. These binary decision variables define the city at which each department will be located. Solving quadratic assignment problems with Gurobi is as easy as configuring the global parameter `nonConvex`, and setting this parameter to the value of 2. ``` model = gp.Model('decentralization') # Set global parameters model.params.nonConvex = 2 # locate deparment d at city c locate = model.addVars(d2c, vtype=GRB.BINARY, name="locate") ``` Each department must be located in exactly one city. ``` # Department location constraint department_location = model.addConstrs((gp.quicksum(locate[d,c] for c in Cities) == 1 for d in Deparments), name='department_location') ``` No city may be the location for more than three departments. ``` # Limit on number of departments departments_limit = model.addConstrs((gp.quicksum(locate[d,c] for d in Deparments) <= 3 for c in Cities), name='departments_limit') ``` We now set the optimization objective, which is to maximize gross margins. ``` model.setObjective((gp.quicksum(benefit[d,c]*locate[d,c] for d,c in d2c) - gp.quicksum(communicationCost[d,c,d2,c2]*locate[d,c]*locate[d2,c2] for d,c,d2,c2 in dcd2c2) ), GRB.MAXIMIZE) # Verify model formulation model.write('decentralizationQA.lp') # Run optimization engine model.optimize() ``` ## Analysis The optimal relocation plan and associated financial report follows. ``` relocation_plan = pd.DataFrame(columns=["Department", "City"]) count = 0 for c in Cities: for d in Deparments: if(locate[d,c].x > 0.5): count += 1 relocation_plan = relocation_plan.append({"Department": d, "City": c }, ignore_index=True ) relocation_plan.index=['']*count relocation_plan print("\n\n_________________________________________________________________________________") print(f"Financial report") print("_________________________________________________________________________________") total_benefit = 0 for c in Cities: for d in Deparments: if(locate[d,c].x > 0.5): total_benefit += 1000*benefit[d,c] dollars_benefit = '${:,.2f}'.format(total_benefit) print(f"The yearly total benefit is {dollars_benefit} dollars") total_communication_cost = 0 for d,c,d2,c2 in dcd2c2: if(locate[d,c].x*locate[d2,c2].x > 0.5): total_communication_cost += 1000*communicationCost[d,c,d2,c2] dollars_communication_cost = '${:,.2f}'.format(total_communication_cost) print(f"The yearly total communication cost is {dollars_communication_cost} dollars") total_gross_margin = total_benefit - total_communication_cost dollars_gross_margin = '${:,.2f}'.format(total_gross_margin) print(f"The yearly total gross margin is {dollars_gross_margin} dollars") ``` ## References H. Paul Williams, Model Building in Mathematical Programming, fifth edition. Copyright © 2020 Gurobi Optimization, LLC
github_jupyter
import pandas as pd import gurobipy as gp from gurobipy import GRB # tested with Python 3.7.0 & Gurobi 9.0 # Lists of deparments and cities Deparments = ['A','B','C','D','E'] Cities = ['Bristol', 'Brighton', 'London'] # Create a dictionary to capture benefits -in thousands of dollars from relocation. d2c, benefit = gp.multidict({ ('A', 'Bristol'): 10, ('A', 'Brighton'): 10, ('A', 'London'): 0, ('B', 'Bristol'): 15, ('B', 'Brighton'): 20, ('B', 'London'): 0, ('C', 'Bristol'): 10, ('C', 'Brighton'): 15, ('C', 'London'): 0, ('D', 'Bristol'): 20, ('D', 'Brighton'): 15, ('D', 'London'): 0, ('E', 'Bristol'): 5, ('E', 'Brighton'): 15, ('E', 'London'): 0 }) # Create a dictionary to capture the communication costs -in thousands of dollars from relocation. dcd2c2, communicationCost = gp.multidict({ ('A','London','C','Bristol'): 13, ('A','London','C','Brighton'): 9, ('A','London','C','London'): 10, ('A','London','D','Bristol'): 19.5, ('A','London','D','Brighton'): 13.5, ('A','London','D','London'): 15, ('B','London','C','Bristol'): 18.2, ('B','London','C','Brighton'): 12.6, ('B','London','C','London'): 14, ('B','London','D','Bristol'): 15.6, ('B','London','D','Brighton'): 10.8, ('B','London','D','London'): 12, ('C','London','E','Bristol'): 26, ('C','London','E','Brighton'): 18, ('C','London','E','London'): 20, ('D','London','E','Bristol'): 9.1, ('D','London','E','Brighton'): 6.3, ('D','London','E','London'): 7, ('A','Bristol','C','Bristol'): 5, ('A','Bristol','C','Brighton'): 14, ('A','Bristol','C','London'): 13, ('A','Bristol','D','Bristol'): 7.5, ('A','Bristol','D','Brighton'): 21, ('A','Bristol','D','London'): 19.5, ('B','Bristol','C','Bristol'): 7, ('B','Bristol','C','Brighton'): 19.6, ('B','Bristol','C','London'): 18.2, ('B','Bristol','D','Bristol'): 6, ('B','Bristol','D','Brighton'): 16.8, ('B','Bristol','D','London'): 15.6, ('C','Bristol','E','Bristol'): 10, ('C','Bristol','E','Brighton'): 28, ('C','Bristol','E','London'): 26, ('D','Bristol','E','Bristol'): 3.5, ('D','Bristol','E','Brighton'): 9.8, ('D','Bristol','E','London'): 9.1, ('A','Brighton','C','Bristol'): 14, ('A','Brighton','C','Brighton'): 5, ('A','Brighton','C','London'): 9, ('A','Brighton','D','Bristol'): 21, ('A','Brighton','D','Brighton'): 7.5, ('A','Brighton','D','London'): 13.5, ('B','Brighton','C','Bristol'): 19.6, ('B','Brighton','C','Brighton'): 7, ('B','Brighton','C','London'): 12.6, ('B','Brighton','D','Bristol'): 16.8, ('B','Brighton','D','Brighton'): 6, ('B','Brighton','D','London'): 10.8, ('C','Brighton','E','Bristol'): 28, ('C','Brighton','E','Brighton'): 10, ('C','Brighton','E','London'): 18, ('D','Brighton','E','Bristol'): 9.8, ('D','Brighton','E','Brighton'): 3.5, ('D','Brighton','E','London'): 6.3 }) model = gp.Model('decentralization') # Set global parameters model.params.nonConvex = 2 # locate deparment d at city c locate = model.addVars(d2c, vtype=GRB.BINARY, name="locate") # Department location constraint department_location = model.addConstrs((gp.quicksum(locate[d,c] for c in Cities) == 1 for d in Deparments), name='department_location') # Limit on number of departments departments_limit = model.addConstrs((gp.quicksum(locate[d,c] for d in Deparments) <= 3 for c in Cities), name='departments_limit') model.setObjective((gp.quicksum(benefit[d,c]*locate[d,c] for d,c in d2c) - gp.quicksum(communicationCost[d,c,d2,c2]*locate[d,c]*locate[d2,c2] for d,c,d2,c2 in dcd2c2) ), GRB.MAXIMIZE) # Verify model formulation model.write('decentralizationQA.lp') # Run optimization engine model.optimize() relocation_plan = pd.DataFrame(columns=["Department", "City"]) count = 0 for c in Cities: for d in Deparments: if(locate[d,c].x > 0.5): count += 1 relocation_plan = relocation_plan.append({"Department": d, "City": c }, ignore_index=True ) relocation_plan.index=['']*count relocation_plan print("\n\n_________________________________________________________________________________") print(f"Financial report") print("_________________________________________________________________________________") total_benefit = 0 for c in Cities: for d in Deparments: if(locate[d,c].x > 0.5): total_benefit += 1000*benefit[d,c] dollars_benefit = '${:,.2f}'.format(total_benefit) print(f"The yearly total benefit is {dollars_benefit} dollars") total_communication_cost = 0 for d,c,d2,c2 in dcd2c2: if(locate[d,c].x*locate[d2,c2].x > 0.5): total_communication_cost += 1000*communicationCost[d,c,d2,c2] dollars_communication_cost = '${:,.2f}'.format(total_communication_cost) print(f"The yearly total communication cost is {dollars_communication_cost} dollars") total_gross_margin = total_benefit - total_communication_cost dollars_gross_margin = '${:,.2f}'.format(total_gross_margin) print(f"The yearly total gross margin is {dollars_gross_margin} dollars")
0.433742
0.990696
In this notebook, we're going to look at how to implement linear regression. We're going to look at the 1-dimensional case, where we use a single feature to estimate the value of a related variable. Once you're comfortable with the single-variable case, try our notebook on linear regression with multiple features! # Import Python modules ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split ``` # Generate Data for Univariate Regression ``` n = 100 #Number of observations in the training set #Assign True parameters to be estimated trueIntercept = 10 trueGradient = -4 x = np.random.uniform(0, 100, n) y = trueIntercept + trueGradient*x + np.random.normal(loc=0, scale=40, size = n) #This is the true relationship that we're trying to model data = pd.DataFrame({'X':x, 'Y':y}) #Put the two arrays into a dataframe to make it easy to work with data.head(10) #Inspect the first ten rows of our dataset ``` # Quickly plot the data so that we know what it looks like ``` plt.scatter(data['X'], data['Y']) plt.show() ``` There appears to be a fairly linear relationship between height and weight in this dataset. Linear regression therefore appropriate without doing feature engineering # The Algebra To fit a linear regression model to data $(x_1, y_1), (x_2, y_2),..., (x_n, y_n)$ such that $y_i = \alpha + \beta x_i + \epsilon_i$, where $\epsilon_i$ is the ith error term, the Least-Squares estimates for the parameters $\alpha, \beta$ are: $$\hat \beta = \frac{\sum^{n}_{i=1}(x_i - \bar x)(y_i - \bar y)}{\sum^{n}_{i=1}(x_i - \bar x)^2}$$ $$\hat \alpha = \bar y - \hat \beta \bar x $$ Once we've calculated $\hat \alpha \& \hat \beta$ we can estimate the label corresponding to a newly observed feature, $x^*$ as: $$ \hat y = \hat \alpha + \hat \beta \times x^* $$ ### How did we arrive at these values? The short answer is: Multivariate Calculus! There are lots of existing resources online which do a good job of explaining how we derive the equations for linear regression - check out [this link](https://towardsdatascience.com/linear-regression-derivation-d362ea3884c2) if you want to read more. ``` class LinearRegression1D: def __init__(self, data, target, feature, trainTestRatio = 0.9): #data - a pandas dataset #target - the name of the pandas column which contains the true labels #feature - the name of the pandas column which we will use to do the regression #trainTestRatio - the proportion of the entire dataset which we'll use for training # - the rest will be used for testing self.target = target self.feature = feature #Split up data into a training and testing set self.train, self.test = train_test_split(data, test_size=1-trainTestRatio) def fitLR(self): #Estimate the model parameters (alpha and beta) from the training data pass def predict(self,x): #Given a vector of new observations x, predict the corresponding target values pass ``` ## The process should work as follows: 1. Create an instance of the class LinearRegression1D and pass it some data 2. Fit a model to the training data (the training and testing data should have been automatically created) by calling the fitLR method 3. Predict the target value for each observation in the testing data and see how you did. Alternatively you can plot the estimate line of best through the testing data for visual inspection ``` myModel = LinearRegression1D(data, 'Y', 'X') myModel.fitLR() #If this returns a zero, then it should have finished. If not we've got problems! ``` ## Now let's see how we did! We can print the parameters we estimated for the gradient and intercept. Recall that the true values are saved as trueIntercept and trueGradient ``` print(f'The true intercept value was {trueIntercept}, we estimated the value to be {myModel.alphaHat}') print(f'The true gradient value was {trueGradient}, we estimated the value to be {myModel.betaHat}') ``` Not too bad! We didn't have a particularly large dataset to train on so to be in the right ballpark with the parameters is perfectly acceptable, given the variability within the data. A larger dataset would most likely give a more accurate estimation of the two values. ## Let's plot the line we have calculated against our data ``` x = np.arange(np.floor(data['X'].min()), np.ceil(data['X'].max())) plt.scatter(data['X'], data['Y'], label = 'Data') plt.plot(x, myModel.alphaHat + x*myModel.betaHat, label = 'Regression line') plt.legend() plt.show() ``` We can see that our regression line runs through the centre of the point cloud, indicating that the model fits nicely. ## As a final check, lets plot the residuals (the error for each prediction) and see what they look like. Hopefully they will be evenly scattered either side of 0 - if not, then our model is biased and our predictions will probably be consistently biased (perhaps as a result of overfitting) ``` testPred = myModel.predict(myModel.test[myModel.feature]) #What our models thinks the true values of y are, given x residuals = testPred - myModel.test[myModel.target] plt.scatter(list(range((residuals.shape[0]))), residuals) plt.ylabel('Residuals') plt.xlabel('index') plt.show() ```
github_jupyter
import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split n = 100 #Number of observations in the training set #Assign True parameters to be estimated trueIntercept = 10 trueGradient = -4 x = np.random.uniform(0, 100, n) y = trueIntercept + trueGradient*x + np.random.normal(loc=0, scale=40, size = n) #This is the true relationship that we're trying to model data = pd.DataFrame({'X':x, 'Y':y}) #Put the two arrays into a dataframe to make it easy to work with data.head(10) #Inspect the first ten rows of our dataset plt.scatter(data['X'], data['Y']) plt.show() class LinearRegression1D: def __init__(self, data, target, feature, trainTestRatio = 0.9): #data - a pandas dataset #target - the name of the pandas column which contains the true labels #feature - the name of the pandas column which we will use to do the regression #trainTestRatio - the proportion of the entire dataset which we'll use for training # - the rest will be used for testing self.target = target self.feature = feature #Split up data into a training and testing set self.train, self.test = train_test_split(data, test_size=1-trainTestRatio) def fitLR(self): #Estimate the model parameters (alpha and beta) from the training data pass def predict(self,x): #Given a vector of new observations x, predict the corresponding target values pass myModel = LinearRegression1D(data, 'Y', 'X') myModel.fitLR() #If this returns a zero, then it should have finished. If not we've got problems! print(f'The true intercept value was {trueIntercept}, we estimated the value to be {myModel.alphaHat}') print(f'The true gradient value was {trueGradient}, we estimated the value to be {myModel.betaHat}') x = np.arange(np.floor(data['X'].min()), np.ceil(data['X'].max())) plt.scatter(data['X'], data['Y'], label = 'Data') plt.plot(x, myModel.alphaHat + x*myModel.betaHat, label = 'Regression line') plt.legend() plt.show() testPred = myModel.predict(myModel.test[myModel.feature]) #What our models thinks the true values of y are, given x residuals = testPred - myModel.test[myModel.target] plt.scatter(list(range((residuals.shape[0]))), residuals) plt.ylabel('Residuals') plt.xlabel('index') plt.show()
0.812533
0.991084
# Modeling Process Much like EDA, the ML process is very iterative and heuristic-based. With minimal knowledge of the problem or data at hand, it is difficult to know which ML method will perform best. This is known as the _no free lunch_ theorem for ML {cite}`wolpert1996lack`. Consequently, it is common for many ML approaches to be applied, evaluated, and modified before a final, optimal model can be determined. Performing this process correctly provides great confidence in our outcomes. If not, the results will be useless and, potentially, damaging [^fatml]. [^fatml]: See https://www.fatml.org/resources/relevant-scholarship for many discussions regarding implications of poorly applied and interpreted ML. Approaching ML modeling correctly means approaching it strategically by spending our data wisely on learning and validation procedures, properly pre-processing the feature and target variables, minimizing data leakage, tuning hyperparameters, and assessing model performance. Many books and courses portray the modeling process as a short sprint. A better analogy would be a marathon where many iterations of these steps are repeated before eventually finding the final optimal model. This process is illustrated in the figure below. ![](../images/modeling_process.png) # Learning objectives Before introducing specific algorithms, this module, and the next, introduce concepts that are fundamental to the ML modeling process and that you will see briskly covered in future modules. By the end of this module you will be able to: 1. Split your data into training and test sets. 2. Instantiate, train, fit, and evaluate a basic model in both R and Python. 4. Apply _k_-fold resampling and hyperparameter tuning procedures to improve the robustness and performance of a model. 5. Put these steps together for an end-to-end ML process. # Prerequisites This section leverages the following packages. We will demonstrate concepts on the Ames housing and employee attrition data sets. ``` # Helper packages import math import numpy as np import pandas as pd from plotnine import ggplot, aes, geom_density, geom_line, geom_point, ggtitle, themes # Data import modeldata # Modeling process from sklearn.utils import resample from sklearn.model_selection import train_test_split, KFold, RepeatedKFold, cross_val_score from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier from sklearn.metrics import mean_squared_error, roc_auc_score # Data used ames = modeldata.load_dataset('ames') attrition = modeldata.load_dataset('attrition') ``` # Data splitting A major goal of the machine learning process is to find an algorithm $f\left(X\right)$ that most accurately predicts future values ($\hat{Y}$) based on a set of features ($X$). In other words, we want an algorithm that not only fits well to our past data, but more importantly, one that predicts a future outcome accurately. This is called the ___generalizability___ of our algorithm. How we "spend" our data will help us understand how well our algorithm generalizes to unseen data. To provide an accurate understanding of the generalizability of our final optimal model, we can split our data into training and test data sets: * __Training set__: these data are used to develop feature sets, train our algorithms, tune hyperparameters, compare models, and all of the other activities required to choose a final model (e.g., the model we want to put into production). * __Test set__: having chosen a final model, these data are used to estimate an unbiased assessment of the model’s performance, which we refer to as the _generalization error_. ![](../images/data_split.png) ```{warning} It is critical that the test set not be used prior to selecting your final model. Assessing results on the test set prior to final model selection biases the model selection process since the testing data will have become part of the model development process. ``` Given a fixed amount of data, typical recommendations for splitting your data into training-test splits include 60% (training)--40% (testing), 70%--30%, or 80%--20%. Generally speaking, these are appropriate guidelines to follow; however, it is good to keep the following points in mind: * Spending too much in training (e.g., $>80\%$) won't allow us to get a good assessment of predictive performance. We may find a model that fits the training data very well, but is not generalizable (_overfitting_). * Sometimes too much spent in testing ($>40\%$) won't allow us to get a good assessment of model parameters. Other factors should also influence the allocation proportions. For example, very large training sets (e.g., $n > 100\texttt{K}$) often result in only marginal gains compared to smaller sample sizes. Consequently, you may use a smaller training sample to increase computation speed (e.g., models built on larger training sets often take longer to score new data sets in production). In contrast, as $p \geq n$ (where $p$ represents the number of features), larger samples sizes are often required to identify consistent signals in the features. The two most common ways of splitting data include ___simple random sampling___ and ___stratified sampling___. ## Simple random sampling The simplest way to split the data into training and test sets is to take a simple random sample. This does not control for any data attributes, such as the distribution of your response variable ($Y$). ```{note} Sampling is a random process so setting the random number generator with a common seed allows for reproducible results. Throughout this course we'll often use the seed `123` for reproducibility but the number itself has no special meaning. ``` ``` # create train/test split train, test = train_test_split(ames, train_size=0.7, random_state=123) # dimensions of training data train.shape ``` With sufficient sample size, this sampling approach will typically result in a similar distribution of $Y$ (e.g., `Sale_Price` in the `ames` data) between your <font color="blue">training</font> and <font color="red">test</font> sets, as illustrated below. ``` train['id'] = 'train' test['id'] = 'test' (ggplot(pd.concat([train, test]), aes('Sale_Price', color='id')) + geom_density() + ggtitle("Random sampling with Python")) ``` ## Stratified sampling If we want to explicitly control the sampling so that our training and test sets have similar $Y$ distributions, we can use stratified sampling. This is more common with classification problems where the response variable may be severely imbalanced (e.g., 90% of observations with response "Yes" and 10% with response "No"). However, we can also apply stratified sampling to regression problems for data sets that have a small sample size and where the response variable deviates strongly from normality. With a continuous response variable, stratified sampling will segment $Y$ into quantiles and randomly sample from each. To perform stratified sampling in Python we simply apply the `stratify` argument in `train_test_split()`. ``` y = attrition["Attrition"] train_strat, test_strat = train_test_split( attrition, train_size=0.3, random_state=123, stratify=y ) ``` The following illustrates that in our original employee attrition data we have an imbalanced response (No: 84%, Yes: 16%). By enforcing stratified sampling, both our training and testing sets have approximately equal response distributions. ``` for data_set in [attrition, train_strat, test_strat]: print(data_set["Attrition"].value_counts(normalize=True)) ``` ## Class imbalances Imbalanced data can have a significant impact on model predictions and performance {cite}`apm`. Most often this involves classification problems where one class has a very small proportion of observations (e.g., defaults - 5% versus nondefaults - 95%). Several sampling methods have been developed to help remedy class imbalance and most of them can be categorized as either _up-sampling_ or _down-sampling_. Down-sampling balances the dataset by reducing the size of the abundant class(es) to match the frequencies in the least prevalent class. This method is used when the quantity of data is sufficient. By keeping all samples in the rare class and randomly selecting an equal number of samples in the abundant class, a balanced new dataset can be retrieved for further modeling. Furthermore, the reduced sample size reduces the computation burden imposed by further steps in the ML process. On the contrary, up-sampling is used when the quantity of data is insufficient. It tries to balance the dataset by increasing the size of rarer samples. Rather than getting rid of abundant samples, new rare samples are generated by using repetition or bootstrapping. ```{note} There is no absolute advantage of one sampling method over another. Application of these two methods depends on the use case it applies to and the data set itself. A combination of over- and under-sampling is often successful and a common approach is known as Synthetic Minority Over-Sampling Technique, or SMOTE {cite}`chawla2002smote`. ``` # Creating models Throughout this book we will apply many different models so you should become quite comfortable with the process. The process of fitting a model is relatively simple and, in many cases (i.e. Scikit-learn), follows a very common pattern. In Python, we are often required to separate our features from our label into discrete data sets. For our first model we will simply use two features from our training data - total square feet of the home (`Gr_Liv_Area`) and year built (`Year_Built`) to predict the sale price. Scikit-learn has many modules for different model types. One module is the [`sklearn.neighbors`](https://scikit-learn.org/stable/modules/neighbors.html) which contains various methods for unsupervised and supervised neighbors-based learning models. In our example, we are going to apply a K-Nearest neighbor regression model since `Sale_Price` is a continuous response. We'll use `KNeighborsRegressor` to do so and in this example we'll simply fit our model to 10 neighbors. ```{note} We will discuss K-Nearest neighbor (KNN) models in detail in a later module but for now just consider we are trying to predict the price of a home based on the average price of 10 other homes that seem to be most similar it. ``` First we create the model object (`knn`) and then fit the model to our training data. ``` # separate features from labels X_train = train[["Gr_Liv_Area", "Year_Built"]] y_train = train["Sale_Price"] # fit a KNN regression model with 10 neighbors knn = KNeighborsRegressor(n_neighbors=10) m1 = knn.fit(X_train, y_train) m1 ``` We have fit our model, if we want to see our predictions we can simply apply `predict()` and feed it the data set we want to make predictions on: ``` m1.predict(X_train) ``` # Evaluating models It is important to understand how our model is performing. With ML models, measuring performance means understanding the predictive accuracy -- the difference between a predicted value and the actual value. We measure predictive accuracy with ___loss functions___. There are many loss functions to choose from when assessing the performance of a predictive model, each providing a unique understanding of the predictive accuracy and differing between regression and classification models. Furthermore, the way a loss function is computed will tend to emphasize certain types of errors over others and can lead to drastic differences in how we interpret the “optimal model”. Its important to consider the problem context when identifying the preferred performance metric to use. And when comparing multiple models, we need to compare them across the same metric. ## Regression models * __MSE__: Mean squared error is the average of the squared error ($MSE = \frac{1}{n} \sum^n_{i=1}(y_i - \hat y_i)^2$)[^mse_deviates]. The squared component results in larger errors having larger penalties. This (along with RMSE) is the most common error metric to use. __Objective: minimize__ [^mse_deviates]: This deviates slightly from the usual definition of MSE in ordinary linear regression, where we divide by $n-p$ (to adjust for bias) as opposed to $n$. * __RMSE__: Root mean squared error. This simply takes the square root of the MSE metric ($RMSE = \sqrt{\frac{1}{n} \sum^n_{i=1}(y_i - \hat y_i)^2}$) so that your error is in the same units as your response variable. If your response variable units are dollars, the units of MSE are dollars-squared, but the RMSE will be in dollars. __Objective: minimize__ * __Deviance__: Short for mean residual deviance. In essence, it provides a degree to which a model explains the variation in a set of data when using maximum likelihood estimation. Essentially this compares a saturated model (i.e. fully featured model) to an unsaturated model (i.e. intercept only or average). If the response variable distribution is Gaussian, then it will be approximately equal to MSE. When not, it usually gives a more useful estimate of error. Deviance is often used with classification models. [^deviance] __Objective: minimize__ [^deviance]: See this StackExchange thread (http://bit.ly/what-is-deviance) for a good overview of deviance for different models and in the context of regression versus classification. * __MAE__: Mean absolute error. Similar to MSE but rather than squaring, it just takes the mean absolute difference between the actual and predicted values ($MAE = \frac{1}{n} \sum^n_{i=1}(\vert y_i - \hat y_i \vert)$). This results in less emphasis on larger errors than MSE. __Objective: minimize__ * __RMSLE__: Root mean squared logarithmic error. Similar to RMSE but it performs a `log()` on the actual and predicted values prior to computing the difference ($RMSLE = \sqrt{\frac{1}{n} \sum^n_{i=1}(log(y_i + 1) - log(\hat y_i + 1))^2}$). When your response variable has a wide range of values, large response values with large errors can dominate the MSE/RMSE metric. RMSLE minimizes this impact so that small response values with large errors can have just as meaningful of an impact as large response values with large errors. __Objective: minimize__ * $R^2$: This is a popular metric that represents the proportion of the variance in the dependent variable that is predictable from the independent variable(s). Unfortunately, it has several limitations. For example, two models built from two different data sets could have the exact same RMSE but if one has less variability in the response variable then it would have a lower $R^2$ than the other. You should not place too much emphasis on this metric. __Objective: maximize__ Most models we assess in this book will report most, if not all, of these metrics. We will emphasize MSE and RMSE but it's important to realize that certain situations warrant emphasis on some metrics more than others. The following illustrates how to compute the MSE and RMSE for our training set. ``` pred = m1.predict(X_train) # compute MSE mse = mean_squared_error(y_train, pred) mse # compute RMSE mean_squared_error(y_train, pred, squared=False) ``` ## Classification models * __Misclassification__: This is the overall error. For example, say you are predicting 3 classes ( _high_, _medium_, _low_ ) and each class has 25, 30, 35 observations respectively (90 observations total). If you misclassify 3 observations of class _high_, 6 of class _medium_, and 4 of class _low_, then you misclassified 13 out of 90 observations resulting in a 14% misclassification rate. __Objective: minimize__ * __Mean per class error__: This is the average error rate for each class. For the above example, this would be the mean of $\frac{3}{25}, \frac{6}{30}, \frac{4}{35}$, which is 14.5%. If your classes are balanced this will be identical to misclassification. __Objective: minimize__ * __MSE__: Mean squared error. Computes the distance from 1.0 to the probability suggested. So, say we have three classes, A, B, and C, and your model predicts a probability of 0.91 for A, 0.07 for B, and 0.02 for C. If the correct answer was A the $MSE = 0.09^2 = 0.0081$, if it is B $MSE = 0.93^2 = 0.8649$, if it is C $MSE = 0.98^2 = 0.9604$. The squared component results in large differences in probabilities for the true class having larger penalties. __Objective: minimize__ * __Cross-entropy (aka Log Loss or Deviance)__: Similar to MSE but it incorporates a log of the predicted probability multiplied by the true class. Consequently, this metric disproportionately punishes predictions where we predict a small probability for the true class, which is another way of saying having high confidence in the wrong answer is really bad. __Objective: minimize__ * __Gini index__: Mainly used with tree-based methods and commonly referred to as a measure of _purity_ where a small value indicates that a node contains predominantly observations from a single class. __Objective: minimize__ When applying classification models, we often use a _confusion matrix_ to evaluate certain performance measures. A confusion matrix is simply a matrix that compares actual categorical levels (or events) to the predicted categorical levels. When we predict the right level, we refer to this as a _true positive_. However, if we predict a level or event that did not happen this is called a _false positive_ (i.e. we predicted a customer would redeem a coupon and they did not). Alternatively, when we do not predict a level or event and it does happen that this is called a _false negative_ (i.e. a customer that we did not predict to redeem a coupon does). ![](../images/confusion-matrix.png) We can extract different levels of performance for binary classifiers. For example, given the classification (or confusion) matrix illustrated above we can assess the following: * __Accuracy__: Overall, how often is the classifier correct? Opposite of misclassification above. Example: $\frac{TP + TN}{total} = \frac{100+50}{165} = 0.91$. __Objective: maximize__ * __Precision__: How accurately does the classifier predict events? This metric is concerned with maximizing the true positives to false positive ratio. In other words, for the number of predictions that we made, how many were correct? Example: $\frac{TP}{TP + FP} = \frac{100}{100+10} = 0.91$. __Objective: maximize__ * __Sensitivity (aka recall)__: How accurately does the classifier classify actual events? This metric is concerned with maximizing the true positives to false negatives ratio. In other words, for the events that occurred, how many did we predict? Example: $\frac{TP}{TP + FN} = \frac{100}{100+5} = 0.95$. __Objective: maximize__ * __Specificity__: How accurately does the classifier classify actual non-events? Example: $\frac{TN}{TN + FP} = \frac{50}{50+10} = 0.83$. __Objective: maximize__ ![](../images/confusion-matrix2.png) * __AUC__: Area under the curve. A good binary classifier will have high precision and sensitivity. This means the classifier does well when it predicts an event will and will not occur, which minimizes false positives and false negatives. To capture this balance, we often use a ROC curve that plots the false positive rate along the x-axis and the true positive rate along the y-axis. A line that is diagonal from the lower left corner to the upper right corner represents a random guess. The higher the line is in the upper left-hand corner, the better. AUC computes the area under this curve. __Objective: maximize__ ![](../images/modeling-process-roc-1.png) The following is an example of computing the AUC for classification models developed on the Attrition data in Python. Do not be too concerned with understanding all the nuances. The main thing to note is in both cases we follow a similar procedure of fitting our model, computing predicted values, and then comparing the the predicted values to the actual values. ``` # convert response to binary ints train_strat["Attrition"].replace(('Yes', 'No'), (1, 0), inplace=True) # separate features from labels X_train_strat = train_strat[["DistanceFromHome"]] y_train_strat = np.array(train_strat["Attrition"]) # fit a KNN regression model with 10 neighbors knn2 = KNeighborsClassifier(n_neighbors=10) m2 = knn2.fit(X_train_strat, y_train_strat) # make predictions pred = m2.predict_proba(X_train_strat) # compute AUC roc_auc_score(y_train_strat, pred[:, 1]) ``` # Resampling methods In the data splitting section we split our data into training and testing sets. Furthermore, we were very explicit about the fact that we ___do not___ use the test set to assess model performance during the training phase. So how do we assess the generalization performance of the model? One option is to assess an error metric based on the training data, which demonstrated in the last section. Unfortunately, this leads to biased results as some models can perform very well on the training data but not generalize well to a new data set. A second method is to use a _validation_ approach, which involves splitting the training set further to create two parts: a training set and a validation set (or _holdout set_). We can then train our model(s) on the new training set and estimate the performance on the validation set. Unfortunately, validation using a single holdout set can be highly variable and unreliable unless you are working with very large data sets {cite}`molinaro2005prediction, hawkins2003assessing`. As the size of your data set reduces, this concern increases. ```{note} Although we stick to our definitions of test, validation, and holdout sets, these terms are sometimes used interchangeably in other literature and software. What's important to remember is to always put a portion of the data under lock and key until a final model has been selected (we refer to this as the test data, but others refer to it as the holdout set). ``` ___Resampling methods___ provide an alternative approach by allowing us to repeatedly fit a model of interest to parts of the training data and test its performance on other parts. The two most commonly used resampling method include _k-fold cross validation_ and _bootstrap sampling_. ## _k_-fold cross validation _k_-fold cross-validation (aka _k_-fold CV) is a resampling method that randomly divides the training data into _k_ groups (aka folds) of approximately equal size. The model is fit on $k-1$ folds and then the remaining fold is used to compute model performance. This procedure is repeated _k_ times; each time, a different fold is treated as the validation set. This process results in _k_ estimates of the generalization error (say $\epsilon_1, \epsilon_2, \dots, \epsilon_k$). Thus, the _k_-fold CV estimate is computed by averaging the _k_ test errors, providing us with an approximation of the error we might expect on unseen data. ![](../images/cv.png) Consequently, with _k_-fold CV, every observation in the training data will be held out one time to be included in the test set as illustrated in the figure below. In practice, one typically uses $k = 5$ or $k = 10$. There is no formal rule as to the size of _k_; however, as _k_ gets larger, the difference between the estimated performance and the true performance to be seen on the test set will decrease. On the other hand, using too large _k_ can introduce computational burdens. Moreover, {cite}`molinaro2005prediction` found that $k=10$ performed similarly to leave-one-out cross validation (LOOCV) which is the most extreme approach (i.e., setting $k = n$). The following is an illustration of 10-fold cross validation on 32 observations. Each observation is used once for validation and nine times for training. ![](../images/modeling-process-cv-1.png) Although using $k \geq 10$ helps to minimize the variability in the estimated performance, _k_-fold CV still tends to have higher variability than bootstrapping. {cite}`kim2009estimating` showed that repeating _k_-fold CV can help to increase the precision of the estimated generalization error. Consequently, for smaller data sets (say $n < 10,000$), 10-fold CV repeated 5 or 10 times will improve the accuracy of your estimated performance and also provide an estimate of its variability. In Python we use `KFold`, `RepeatedKFold` to create k-fold objects and then `cross_val_score` to train our model across all *k* folds and provide our loss score for each fold. ```{note} The unified scoring API in scikit-learn always maximizes the score, so scores which need to be minimized are negated in order for the unified scoring API to work correctly. Consequently, you can just interpret the RMSE values below as the $RMSE \times -1$. ``` ``` # define loss function loss = 'neg_root_mean_squared_error' # create 10 fold CV object kfold = KFold(n_splits=10, random_state=123, shuffle=True) # fit model with 10-fold CV results = cross_val_score(m1, X_train, y_train, cv=kfold, scoring=loss) np.abs(results) # summary stats for all 10 folds pd.Series(np.abs(results)).describe() # 10 fold cross validation repated 5 times (total of 50 folds) rkf = RepeatedKFold(n_splits=10, n_repeats=5, random_state=123) results = cross_val_score(m1, X_train, y_train, cv=rkf, scoring=loss) np.abs(results) # average RMSE across all 50 folds abs(results.mean()) ``` ## Bootstrap sampling A bootstrap sample is a random sample of the data taken with replacement {cite}`esl`. This means that, after a data point is selected for inclusion in the subset, it’s still available for further selection. A bootstrap sample is the same size as the original data set from which it was constructed. The figure below provides a schematic of bootstrap sampling where each bootstrap sample contains 12 observations just as in the original data set. Furthermore, bootstrap sampling will contain approximately the same distribution of values (represented by colors) as the original data set. ![](../images/bootstrap-scheme.png) Since samples are drawn with replacement, each bootstrap sample is likely to contain duplicate values. In fact, on average, $\approx 63.21$% of the original sample ends up in any particular bootstrap sample. The original observations not contained in a particular bootstrap sample are considered _out-of-bag_ (OOB). When bootstrapping, a model can be built on the selected samples and validated on the OOB samples; this is often done, for example, in random forests. Since observations are replicated in bootstrapping, there tends to be less variability in the error measure compared with _k_-fold CV {cite}`efron1983estimating`. However, this can also increase the bias of your error estimate. This can be problematic with smaller data sets; however, for most average-to-large data sets (say $n \geq 1,000$) this concern is often negligible. The figure that follows compares bootstrapping to 10-fold CV on a small data set with $n = 32$ observations. A thorough introduction to bootstrappingis provided in {cite}`davison1997bootstrap`. ![](../images/modeling-process-sampling-comparison-1.png) Although bootstrapping is not built into scikit-learn as easily as `KFold`, we can create bootstrap samples fairly easily with the `sklearn.utils.resamples()` function as illustrated in the code chunk below. ``` # Number of bootstrap samples to be repeated (created) n_iterations = 10 results = list() for i in range(n_iterations): # create bootrap samples bs_sample = resample(X_train.index) bs_X_train = X_train.loc[bs_sample] bs_y_train = y_train.loc[bs_sample] # get non-selected observations non_selected_rows = list(set(X_train.index) - set(bs_sample)) bs_X_test = X_train.loc[non_selected_rows] bs_y_test = y_train.loc[non_selected_rows] # fit model knn = KNeighborsRegressor(n_neighbors=10) bs_model = knn.fit(bs_X_train, bs_y_train) # evaluate model predictions = bs_model.predict(bs_X_test) rmse = mean_squared_error(bs_y_test, predictions, squared=False) # report & save results results.append(rmse) print(f'Bootstrap {i}: {round(rmse, 2)}') # average bootstrap RMSE np.mean(results) ``` Bootstrapping is, typically, more of an internal resampling procedure that is naturally built into certain ML algorithms. This will become more apparent in later chapters where we discuss bagging and random forests, respectively. ### Alternatives It is important to note that there are other useful resampling procedures. If you're working with time-series specific data then you will want to incorporate rolling origin and other time series resampling procedures, which are also available in scikit-learn. Additionally, {cite}`efron1983estimating` developed the "632 method" and {cite}`efron1997improvements` discuss the "632+ method"; both approaches seek to minimize biases experienced with bootstrapping on smaller data sets. # Bias variance trade-off Prediction errors can be decomposed into two important subcomponents: error due to "bias" and error due to "variance". There is often a tradeoff between a model's ability to minimize bias and variance. Understanding how different sources of error lead to bias and variance helps us improve the data fitting process resulting in more accurate models. ## Bias _Bias_ is the difference between the expected (or average) prediction of our model and the correct value which we are trying to predict. It measures how far off in general a model's predictions are from the correct value, which provides a sense of how well a model can conform to the underlying structure of the data. The figure below illustrates an example where the polynomial model does not capture the underlying structure well. Linear models are classical examples of high bias models as they are less flexible and rarely capture non-linear, non-monotonic relationships. We also need to think of bias-variance in relation to resampling. Models with high bias are rarely affected by the noise introduced by resampling. If a model has high bias, it will have consistency in its resampling performance as illustrated below: ![](../images/modeling-process-bias-model-1.png) ## Variance On the other hand, error due to _variance_ is defined as the variability of a model prediction for a given data point. Many models (e.g., _k_-nearest neighbor, decision trees, gradient boosting machines) are very adaptable and offer extreme flexibility in the patterns that they can fit to. However, these models offer their own problems as they run the risk of overfitting to the training data. Although you may achieve very good performance on your training data, the model will not automatically generalize well to unseen data. The following illustrates how a high variance k-nearest neighbor model fit to a single data set captures the underlying non-linear, non-monotonic data structure well but also overfits to individual data points (left). Models fit to 25 bootstrapped replicates of the data are deterred by the noise and generate highly variable predictions (right). ![](../images/modeling-process-variance-model-1.png) Since high variance models are more prone to overfitting, using resampling procedures are critical to reduce this risk. Moreover, many algorithms that are capable of achieving high generalization performance have lots of _hyperparameters_ that control the level of model complexity (i.e., the tradeoff between bias and variance). ## Hyperparameter tuning Hyperparameters (aka _tuning parameters_) are the "knobs to twiddle"[^twiddle]. Not all algorithms have hyperparameters (e.g., ordinary least squares[^ols_hyper]); however, most have at least one or more. [^twiddle]: This phrase comes from Brad Efron's comments in {cite}`breiman2001statistical` to control the complexity of machine learning algorithms and, therefore, the bias-variance trade-off. [^ols_hyper]: At least in the ordinary sense. You could think of polynomial regression as having a single hyperparameter, the degree of the polynomial. The proper setting of these hyperparameters is often dependent on the data and problem at hand and cannot always be estimated by the training data alone. Consequently, we need a method of identifying the optimal setting. For example, in the high variance example in the previous section, we illustrated a high variance _k_-nearest neighbor model. _k_-nearest neighbor models have a single hyperparameter (_k_) that determines the predicted value to be made based on the _k_ nearest observations in the training data to the one being predicted. If _k_ is small (e.g., $k=3$), the model will make a prediction for a given observation based on the average of the response values for the 3 observations in the training data most similar to the observation being predicted. This often results in highly variable predicted values because we are basing the prediction (in this case, an average) on a very small subset of the training data. As _k_ gets bigger, we base our predictions on an average of a larger subset of the training data, which naturally reduces the variance in our predicted values (remember this for later, averaging often helps to reduce variance!). The figure below illustrates this point. Smaller _k_ values (e.g., 2, 5, or 10) lead to high variance (but lower bias) and larger values (e.g., 150) lead to high bias (but lower variance). The optimal _k_ value might exist somewhere between 20--50, but how do we know which value of _k_ to use? ![](../images/modeling-process-knn-options-1.png) One way to perform hyperparameter tuning is to fiddle with hyperparameters manually until you find a great combination of hyperparameter values that result in high predictive accuracy (as measured using _k_-fold CV, for instance). However, this can be very tedious work depending on the number of hyperparameters. An alternative approach is to perform a _grid search_. A grid search is an automated approach to searching across many combinations of hyperparameter values. For the simple example above, a grid search would predefine a candidate set of values for _k_ (e.g., $k = 1, 2, \dots, j$) and perform a resampling method (e.g., _k_-fold CV) to estimate which _k_ value generalizes the best to unseen data. The plots in the below examples illustrate the results from a grid search to assess $k = 3, 5, \dots, 150$ using repeated 10-fold CV. The error rate displayed represents the average error for each value of _k_ across all the repeated CV folds. On average, $k=46$ was the optimal hyperparameter value to minimize error (in this case, RMSE which will be discussed shortly) on unseen data. ![](../images/modeling-process-knn-tune-1.png) Throughout this book you'll be exposed to different approaches to performing grid searches. In the above example, we used a _full cartesian grid search_, which assesses every hyperparameter value manually defined. However, as models get more complex and offer more hyperparameters, this approach can become computationally burdensome and requires you to define the optimal hyperparameter grid settings to explore. Additional approaches we'll illustrate include _random grid searches_ {cite}`bergstra2012random` which explores randomly selected hyperparameter values from a range of possible values, _early stopping_ which allows you to stop a grid search once reduction in the error stops marginally improving, _sequential model-based optimization_ {cite}`bergstra2011algorithms` which adaptively resamples candidate hyperparameter values based on approximately optimal performance, and more. The following provides an example of a full cartesian grid search using `GridSearchCV()` where we supply it a model object and hyperparameter values we want to assess. You'll also notice that we supply it with the kfold object we created previously and the loss function we want to optimize for. ``` # basic model object knn = KNeighborsRegressor() # Create grid of hyperparameter values hyper_grid = {'n_neighbors': range(2, 26)} # Tune a knn model using grid search grid_search = GridSearchCV(knn, hyper_grid, cv=kfold, scoring=loss) results = grid_search.fit(X_train, y_train) # Best model's cross validated RMSE abs(results.best_score_) # Best model's k value results.best_estimator_.get_params().get('n_neighbors') # Plot all RMSE results all_rmse = pd.DataFrame({ 'k': range(2, 26), 'RMSE': np.abs(results.cv_results_['mean_test_score']) }) (ggplot(all_rmse, aes(x='k', y='RMSE')) + geom_line() + geom_point() + ggtitle("Cross validated grid search results")) ``` # Putting the processes together You've now been exposed to many of the fundamental pieces of an ML process. The following combines these code snippets into a larger recipe to show you how they all come together. Rather than just look at the 2 features that we included thus far (`Gr_Liv_Area` & `Year_Built`), we'll include all numeric features. ```{note} To include the categorical features as well we will need to do some feature engineering, which we will discuss in the next session. ``` ``` # create train/test split train, test = train_test_split(ames, train_size=0.7, random_state=123) # separate features from labels and only use numeric features X_train = train.select_dtypes(include='number').drop("Sale_Price", axis=1) y_train = train["Sale_Price"] # create KNN model object knn = KNeighborsRegressor() # define loss function loss = 'neg_root_mean_squared_error' # create 10 fold CV object kfold = KFold(n_splits=10, random_state=123, shuffle=True) # Create grid of hyperparameter values hyper_grid = {'n_neighbors': range(2, 26)} # Tune a knn model using grid search grid_search = GridSearchCV(knn, hyper_grid, cv=kfold, scoring=loss) results = grid_search.fit(X_train, y_train) # Best model's cross validated RMSE abs(results.best_score_) # Best model's k value results.best_estimator_.get_params().get('n_neighbors') # Plot all RMSE results all_rmse = pd.DataFrame({ 'k': range(2, 26), 'RMSE': np.abs(results.cv_results_['mean_test_score']) }) (ggplot(all_rmse, aes(x='k', y='RMSE')) + geom_line() + geom_point() + ggtitle("Cross validated grid search results")) ``` # References ```{bibliography} ```
github_jupyter
# Helper packages import math import numpy as np import pandas as pd from plotnine import ggplot, aes, geom_density, geom_line, geom_point, ggtitle, themes # Data import modeldata # Modeling process from sklearn.utils import resample from sklearn.model_selection import train_test_split, KFold, RepeatedKFold, cross_val_score from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier from sklearn.metrics import mean_squared_error, roc_auc_score # Data used ames = modeldata.load_dataset('ames') attrition = modeldata.load_dataset('attrition') Given a fixed amount of data, typical recommendations for splitting your data into training-test splits include 60% (training)--40% (testing), 70%--30%, or 80%--20%. Generally speaking, these are appropriate guidelines to follow; however, it is good to keep the following points in mind: * Spending too much in training (e.g., $>80\%$) won't allow us to get a good assessment of predictive performance. We may find a model that fits the training data very well, but is not generalizable (_overfitting_). * Sometimes too much spent in testing ($>40\%$) won't allow us to get a good assessment of model parameters. Other factors should also influence the allocation proportions. For example, very large training sets (e.g., $n > 100\texttt{K}$) often result in only marginal gains compared to smaller sample sizes. Consequently, you may use a smaller training sample to increase computation speed (e.g., models built on larger training sets often take longer to score new data sets in production). In contrast, as $p \geq n$ (where $p$ represents the number of features), larger samples sizes are often required to identify consistent signals in the features. The two most common ways of splitting data include ___simple random sampling___ and ___stratified sampling___. ## Simple random sampling The simplest way to split the data into training and test sets is to take a simple random sample. This does not control for any data attributes, such as the distribution of your response variable ($Y$). With sufficient sample size, this sampling approach will typically result in a similar distribution of $Y$ (e.g., `Sale_Price` in the `ames` data) between your <font color="blue">training</font> and <font color="red">test</font> sets, as illustrated below. ## Stratified sampling If we want to explicitly control the sampling so that our training and test sets have similar $Y$ distributions, we can use stratified sampling. This is more common with classification problems where the response variable may be severely imbalanced (e.g., 90% of observations with response "Yes" and 10% with response "No"). However, we can also apply stratified sampling to regression problems for data sets that have a small sample size and where the response variable deviates strongly from normality. With a continuous response variable, stratified sampling will segment $Y$ into quantiles and randomly sample from each. To perform stratified sampling in Python we simply apply the `stratify` argument in `train_test_split()`. The following illustrates that in our original employee attrition data we have an imbalanced response (No: 84%, Yes: 16%). By enforcing stratified sampling, both our training and testing sets have approximately equal response distributions. ## Class imbalances Imbalanced data can have a significant impact on model predictions and performance {cite}`apm`. Most often this involves classification problems where one class has a very small proportion of observations (e.g., defaults - 5% versus nondefaults - 95%). Several sampling methods have been developed to help remedy class imbalance and most of them can be categorized as either _up-sampling_ or _down-sampling_. Down-sampling balances the dataset by reducing the size of the abundant class(es) to match the frequencies in the least prevalent class. This method is used when the quantity of data is sufficient. By keeping all samples in the rare class and randomly selecting an equal number of samples in the abundant class, a balanced new dataset can be retrieved for further modeling. Furthermore, the reduced sample size reduces the computation burden imposed by further steps in the ML process. On the contrary, up-sampling is used when the quantity of data is insufficient. It tries to balance the dataset by increasing the size of rarer samples. Rather than getting rid of abundant samples, new rare samples are generated by using repetition or bootstrapping. # Creating models Throughout this book we will apply many different models so you should become quite comfortable with the process. The process of fitting a model is relatively simple and, in many cases (i.e. Scikit-learn), follows a very common pattern. In Python, we are often required to separate our features from our label into discrete data sets. For our first model we will simply use two features from our training data - total square feet of the home (`Gr_Liv_Area`) and year built (`Year_Built`) to predict the sale price. Scikit-learn has many modules for different model types. One module is the [`sklearn.neighbors`](https://scikit-learn.org/stable/modules/neighbors.html) which contains various methods for unsupervised and supervised neighbors-based learning models. In our example, we are going to apply a K-Nearest neighbor regression model since `Sale_Price` is a continuous response. We'll use `KNeighborsRegressor` to do so and in this example we'll simply fit our model to 10 neighbors. First we create the model object (`knn`) and then fit the model to our training data. We have fit our model, if we want to see our predictions we can simply apply `predict()` and feed it the data set we want to make predictions on: # Evaluating models It is important to understand how our model is performing. With ML models, measuring performance means understanding the predictive accuracy -- the difference between a predicted value and the actual value. We measure predictive accuracy with ___loss functions___. There are many loss functions to choose from when assessing the performance of a predictive model, each providing a unique understanding of the predictive accuracy and differing between regression and classification models. Furthermore, the way a loss function is computed will tend to emphasize certain types of errors over others and can lead to drastic differences in how we interpret the “optimal model”. Its important to consider the problem context when identifying the preferred performance metric to use. And when comparing multiple models, we need to compare them across the same metric. ## Regression models * __MSE__: Mean squared error is the average of the squared error ($MSE = \frac{1}{n} \sum^n_{i=1}(y_i - \hat y_i)^2$)[^mse_deviates]. The squared component results in larger errors having larger penalties. This (along with RMSE) is the most common error metric to use. __Objective: minimize__ [^mse_deviates]: This deviates slightly from the usual definition of MSE in ordinary linear regression, where we divide by $n-p$ (to adjust for bias) as opposed to $n$. * __RMSE__: Root mean squared error. This simply takes the square root of the MSE metric ($RMSE = \sqrt{\frac{1}{n} \sum^n_{i=1}(y_i - \hat y_i)^2}$) so that your error is in the same units as your response variable. If your response variable units are dollars, the units of MSE are dollars-squared, but the RMSE will be in dollars. __Objective: minimize__ * __Deviance__: Short for mean residual deviance. In essence, it provides a degree to which a model explains the variation in a set of data when using maximum likelihood estimation. Essentially this compares a saturated model (i.e. fully featured model) to an unsaturated model (i.e. intercept only or average). If the response variable distribution is Gaussian, then it will be approximately equal to MSE. When not, it usually gives a more useful estimate of error. Deviance is often used with classification models. [^deviance] __Objective: minimize__ [^deviance]: See this StackExchange thread (http://bit.ly/what-is-deviance) for a good overview of deviance for different models and in the context of regression versus classification. * __MAE__: Mean absolute error. Similar to MSE but rather than squaring, it just takes the mean absolute difference between the actual and predicted values ($MAE = \frac{1}{n} \sum^n_{i=1}(\vert y_i - \hat y_i \vert)$). This results in less emphasis on larger errors than MSE. __Objective: minimize__ * __RMSLE__: Root mean squared logarithmic error. Similar to RMSE but it performs a `log()` on the actual and predicted values prior to computing the difference ($RMSLE = \sqrt{\frac{1}{n} \sum^n_{i=1}(log(y_i + 1) - log(\hat y_i + 1))^2}$). When your response variable has a wide range of values, large response values with large errors can dominate the MSE/RMSE metric. RMSLE minimizes this impact so that small response values with large errors can have just as meaningful of an impact as large response values with large errors. __Objective: minimize__ * $R^2$: This is a popular metric that represents the proportion of the variance in the dependent variable that is predictable from the independent variable(s). Unfortunately, it has several limitations. For example, two models built from two different data sets could have the exact same RMSE but if one has less variability in the response variable then it would have a lower $R^2$ than the other. You should not place too much emphasis on this metric. __Objective: maximize__ Most models we assess in this book will report most, if not all, of these metrics. We will emphasize MSE and RMSE but it's important to realize that certain situations warrant emphasis on some metrics more than others. The following illustrates how to compute the MSE and RMSE for our training set. ## Classification models * __Misclassification__: This is the overall error. For example, say you are predicting 3 classes ( _high_, _medium_, _low_ ) and each class has 25, 30, 35 observations respectively (90 observations total). If you misclassify 3 observations of class _high_, 6 of class _medium_, and 4 of class _low_, then you misclassified 13 out of 90 observations resulting in a 14% misclassification rate. __Objective: minimize__ * __Mean per class error__: This is the average error rate for each class. For the above example, this would be the mean of $\frac{3}{25}, \frac{6}{30}, \frac{4}{35}$, which is 14.5%. If your classes are balanced this will be identical to misclassification. __Objective: minimize__ * __MSE__: Mean squared error. Computes the distance from 1.0 to the probability suggested. So, say we have three classes, A, B, and C, and your model predicts a probability of 0.91 for A, 0.07 for B, and 0.02 for C. If the correct answer was A the $MSE = 0.09^2 = 0.0081$, if it is B $MSE = 0.93^2 = 0.8649$, if it is C $MSE = 0.98^2 = 0.9604$. The squared component results in large differences in probabilities for the true class having larger penalties. __Objective: minimize__ * __Cross-entropy (aka Log Loss or Deviance)__: Similar to MSE but it incorporates a log of the predicted probability multiplied by the true class. Consequently, this metric disproportionately punishes predictions where we predict a small probability for the true class, which is another way of saying having high confidence in the wrong answer is really bad. __Objective: minimize__ * __Gini index__: Mainly used with tree-based methods and commonly referred to as a measure of _purity_ where a small value indicates that a node contains predominantly observations from a single class. __Objective: minimize__ When applying classification models, we often use a _confusion matrix_ to evaluate certain performance measures. A confusion matrix is simply a matrix that compares actual categorical levels (or events) to the predicted categorical levels. When we predict the right level, we refer to this as a _true positive_. However, if we predict a level or event that did not happen this is called a _false positive_ (i.e. we predicted a customer would redeem a coupon and they did not). Alternatively, when we do not predict a level or event and it does happen that this is called a _false negative_ (i.e. a customer that we did not predict to redeem a coupon does). ![](../images/confusion-matrix.png) We can extract different levels of performance for binary classifiers. For example, given the classification (or confusion) matrix illustrated above we can assess the following: * __Accuracy__: Overall, how often is the classifier correct? Opposite of misclassification above. Example: $\frac{TP + TN}{total} = \frac{100+50}{165} = 0.91$. __Objective: maximize__ * __Precision__: How accurately does the classifier predict events? This metric is concerned with maximizing the true positives to false positive ratio. In other words, for the number of predictions that we made, how many were correct? Example: $\frac{TP}{TP + FP} = \frac{100}{100+10} = 0.91$. __Objective: maximize__ * __Sensitivity (aka recall)__: How accurately does the classifier classify actual events? This metric is concerned with maximizing the true positives to false negatives ratio. In other words, for the events that occurred, how many did we predict? Example: $\frac{TP}{TP + FN} = \frac{100}{100+5} = 0.95$. __Objective: maximize__ * __Specificity__: How accurately does the classifier classify actual non-events? Example: $\frac{TN}{TN + FP} = \frac{50}{50+10} = 0.83$. __Objective: maximize__ ![](../images/confusion-matrix2.png) * __AUC__: Area under the curve. A good binary classifier will have high precision and sensitivity. This means the classifier does well when it predicts an event will and will not occur, which minimizes false positives and false negatives. To capture this balance, we often use a ROC curve that plots the false positive rate along the x-axis and the true positive rate along the y-axis. A line that is diagonal from the lower left corner to the upper right corner represents a random guess. The higher the line is in the upper left-hand corner, the better. AUC computes the area under this curve. __Objective: maximize__ ![](../images/modeling-process-roc-1.png) The following is an example of computing the AUC for classification models developed on the Attrition data in Python. Do not be too concerned with understanding all the nuances. The main thing to note is in both cases we follow a similar procedure of fitting our model, computing predicted values, and then comparing the the predicted values to the actual values. # Resampling methods In the data splitting section we split our data into training and testing sets. Furthermore, we were very explicit about the fact that we ___do not___ use the test set to assess model performance during the training phase. So how do we assess the generalization performance of the model? One option is to assess an error metric based on the training data, which demonstrated in the last section. Unfortunately, this leads to biased results as some models can perform very well on the training data but not generalize well to a new data set. A second method is to use a _validation_ approach, which involves splitting the training set further to create two parts: a training set and a validation set (or _holdout set_). We can then train our model(s) on the new training set and estimate the performance on the validation set. Unfortunately, validation using a single holdout set can be highly variable and unreliable unless you are working with very large data sets {cite}`molinaro2005prediction, hawkins2003assessing`. As the size of your data set reduces, this concern increases. ___Resampling methods___ provide an alternative approach by allowing us to repeatedly fit a model of interest to parts of the training data and test its performance on other parts. The two most commonly used resampling method include _k-fold cross validation_ and _bootstrap sampling_. ## _k_-fold cross validation _k_-fold cross-validation (aka _k_-fold CV) is a resampling method that randomly divides the training data into _k_ groups (aka folds) of approximately equal size. The model is fit on $k-1$ folds and then the remaining fold is used to compute model performance. This procedure is repeated _k_ times; each time, a different fold is treated as the validation set. This process results in _k_ estimates of the generalization error (say $\epsilon_1, \epsilon_2, \dots, \epsilon_k$). Thus, the _k_-fold CV estimate is computed by averaging the _k_ test errors, providing us with an approximation of the error we might expect on unseen data. ![](../images/cv.png) Consequently, with _k_-fold CV, every observation in the training data will be held out one time to be included in the test set as illustrated in the figure below. In practice, one typically uses $k = 5$ or $k = 10$. There is no formal rule as to the size of _k_; however, as _k_ gets larger, the difference between the estimated performance and the true performance to be seen on the test set will decrease. On the other hand, using too large _k_ can introduce computational burdens. Moreover, {cite}`molinaro2005prediction` found that $k=10$ performed similarly to leave-one-out cross validation (LOOCV) which is the most extreme approach (i.e., setting $k = n$). The following is an illustration of 10-fold cross validation on 32 observations. Each observation is used once for validation and nine times for training. ![](../images/modeling-process-cv-1.png) Although using $k \geq 10$ helps to minimize the variability in the estimated performance, _k_-fold CV still tends to have higher variability than bootstrapping. {cite}`kim2009estimating` showed that repeating _k_-fold CV can help to increase the precision of the estimated generalization error. Consequently, for smaller data sets (say $n < 10,000$), 10-fold CV repeated 5 or 10 times will improve the accuracy of your estimated performance and also provide an estimate of its variability. In Python we use `KFold`, `RepeatedKFold` to create k-fold objects and then `cross_val_score` to train our model across all *k* folds and provide our loss score for each fold. ## Bootstrap sampling A bootstrap sample is a random sample of the data taken with replacement {cite}`esl`. This means that, after a data point is selected for inclusion in the subset, it’s still available for further selection. A bootstrap sample is the same size as the original data set from which it was constructed. The figure below provides a schematic of bootstrap sampling where each bootstrap sample contains 12 observations just as in the original data set. Furthermore, bootstrap sampling will contain approximately the same distribution of values (represented by colors) as the original data set. ![](../images/bootstrap-scheme.png) Since samples are drawn with replacement, each bootstrap sample is likely to contain duplicate values. In fact, on average, $\approx 63.21$% of the original sample ends up in any particular bootstrap sample. The original observations not contained in a particular bootstrap sample are considered _out-of-bag_ (OOB). When bootstrapping, a model can be built on the selected samples and validated on the OOB samples; this is often done, for example, in random forests. Since observations are replicated in bootstrapping, there tends to be less variability in the error measure compared with _k_-fold CV {cite}`efron1983estimating`. However, this can also increase the bias of your error estimate. This can be problematic with smaller data sets; however, for most average-to-large data sets (say $n \geq 1,000$) this concern is often negligible. The figure that follows compares bootstrapping to 10-fold CV on a small data set with $n = 32$ observations. A thorough introduction to bootstrappingis provided in {cite}`davison1997bootstrap`. ![](../images/modeling-process-sampling-comparison-1.png) Although bootstrapping is not built into scikit-learn as easily as `KFold`, we can create bootstrap samples fairly easily with the `sklearn.utils.resamples()` function as illustrated in the code chunk below. Bootstrapping is, typically, more of an internal resampling procedure that is naturally built into certain ML algorithms. This will become more apparent in later chapters where we discuss bagging and random forests, respectively. ### Alternatives It is important to note that there are other useful resampling procedures. If you're working with time-series specific data then you will want to incorporate rolling origin and other time series resampling procedures, which are also available in scikit-learn. Additionally, {cite}`efron1983estimating` developed the "632 method" and {cite}`efron1997improvements` discuss the "632+ method"; both approaches seek to minimize biases experienced with bootstrapping on smaller data sets. # Bias variance trade-off Prediction errors can be decomposed into two important subcomponents: error due to "bias" and error due to "variance". There is often a tradeoff between a model's ability to minimize bias and variance. Understanding how different sources of error lead to bias and variance helps us improve the data fitting process resulting in more accurate models. ## Bias _Bias_ is the difference between the expected (or average) prediction of our model and the correct value which we are trying to predict. It measures how far off in general a model's predictions are from the correct value, which provides a sense of how well a model can conform to the underlying structure of the data. The figure below illustrates an example where the polynomial model does not capture the underlying structure well. Linear models are classical examples of high bias models as they are less flexible and rarely capture non-linear, non-monotonic relationships. We also need to think of bias-variance in relation to resampling. Models with high bias are rarely affected by the noise introduced by resampling. If a model has high bias, it will have consistency in its resampling performance as illustrated below: ![](../images/modeling-process-bias-model-1.png) ## Variance On the other hand, error due to _variance_ is defined as the variability of a model prediction for a given data point. Many models (e.g., _k_-nearest neighbor, decision trees, gradient boosting machines) are very adaptable and offer extreme flexibility in the patterns that they can fit to. However, these models offer their own problems as they run the risk of overfitting to the training data. Although you may achieve very good performance on your training data, the model will not automatically generalize well to unseen data. The following illustrates how a high variance k-nearest neighbor model fit to a single data set captures the underlying non-linear, non-monotonic data structure well but also overfits to individual data points (left). Models fit to 25 bootstrapped replicates of the data are deterred by the noise and generate highly variable predictions (right). ![](../images/modeling-process-variance-model-1.png) Since high variance models are more prone to overfitting, using resampling procedures are critical to reduce this risk. Moreover, many algorithms that are capable of achieving high generalization performance have lots of _hyperparameters_ that control the level of model complexity (i.e., the tradeoff between bias and variance). ## Hyperparameter tuning Hyperparameters (aka _tuning parameters_) are the "knobs to twiddle"[^twiddle]. Not all algorithms have hyperparameters (e.g., ordinary least squares[^ols_hyper]); however, most have at least one or more. [^twiddle]: This phrase comes from Brad Efron's comments in {cite}`breiman2001statistical` to control the complexity of machine learning algorithms and, therefore, the bias-variance trade-off. [^ols_hyper]: At least in the ordinary sense. You could think of polynomial regression as having a single hyperparameter, the degree of the polynomial. The proper setting of these hyperparameters is often dependent on the data and problem at hand and cannot always be estimated by the training data alone. Consequently, we need a method of identifying the optimal setting. For example, in the high variance example in the previous section, we illustrated a high variance _k_-nearest neighbor model. _k_-nearest neighbor models have a single hyperparameter (_k_) that determines the predicted value to be made based on the _k_ nearest observations in the training data to the one being predicted. If _k_ is small (e.g., $k=3$), the model will make a prediction for a given observation based on the average of the response values for the 3 observations in the training data most similar to the observation being predicted. This often results in highly variable predicted values because we are basing the prediction (in this case, an average) on a very small subset of the training data. As _k_ gets bigger, we base our predictions on an average of a larger subset of the training data, which naturally reduces the variance in our predicted values (remember this for later, averaging often helps to reduce variance!). The figure below illustrates this point. Smaller _k_ values (e.g., 2, 5, or 10) lead to high variance (but lower bias) and larger values (e.g., 150) lead to high bias (but lower variance). The optimal _k_ value might exist somewhere between 20--50, but how do we know which value of _k_ to use? ![](../images/modeling-process-knn-options-1.png) One way to perform hyperparameter tuning is to fiddle with hyperparameters manually until you find a great combination of hyperparameter values that result in high predictive accuracy (as measured using _k_-fold CV, for instance). However, this can be very tedious work depending on the number of hyperparameters. An alternative approach is to perform a _grid search_. A grid search is an automated approach to searching across many combinations of hyperparameter values. For the simple example above, a grid search would predefine a candidate set of values for _k_ (e.g., $k = 1, 2, \dots, j$) and perform a resampling method (e.g., _k_-fold CV) to estimate which _k_ value generalizes the best to unseen data. The plots in the below examples illustrate the results from a grid search to assess $k = 3, 5, \dots, 150$ using repeated 10-fold CV. The error rate displayed represents the average error for each value of _k_ across all the repeated CV folds. On average, $k=46$ was the optimal hyperparameter value to minimize error (in this case, RMSE which will be discussed shortly) on unseen data. ![](../images/modeling-process-knn-tune-1.png) Throughout this book you'll be exposed to different approaches to performing grid searches. In the above example, we used a _full cartesian grid search_, which assesses every hyperparameter value manually defined. However, as models get more complex and offer more hyperparameters, this approach can become computationally burdensome and requires you to define the optimal hyperparameter grid settings to explore. Additional approaches we'll illustrate include _random grid searches_ {cite}`bergstra2012random` which explores randomly selected hyperparameter values from a range of possible values, _early stopping_ which allows you to stop a grid search once reduction in the error stops marginally improving, _sequential model-based optimization_ {cite}`bergstra2011algorithms` which adaptively resamples candidate hyperparameter values based on approximately optimal performance, and more. The following provides an example of a full cartesian grid search using `GridSearchCV()` where we supply it a model object and hyperparameter values we want to assess. You'll also notice that we supply it with the kfold object we created previously and the loss function we want to optimize for. # Putting the processes together You've now been exposed to many of the fundamental pieces of an ML process. The following combines these code snippets into a larger recipe to show you how they all come together. Rather than just look at the 2 features that we included thus far (`Gr_Liv_Area` & `Year_Built`), we'll include all numeric features. # References
0.905207
0.992711
# Regex ``` import re # regex string: for any alphanumeric character, and greedy "+" word = r'\w+' sentence = "I am Sam; Sam I am." ``` Let's find all words in the sentence using regex ``` re.findall(word, sentence) ``` ## search & match ``` sresult = re.search(word, sentence) sresult.group() mresult = re.match(word, sentence) mresult.group() capitalized_word = r'[A-Z]\w+' sresult = re.search(capitalized_word, sentence) sresult sresult.group() mresult = re.match(capitalized_word, sentence) mresult ``` Nothing is returned! It is NULL -- and we will get an error if we try to call the group method on a NULL object. The reason is that re.match is anchored at the beginning of the string. As re.match documentation says: > If zero or more characters at the beginning of string match the regular > expression pattern, return a corresponding MatchObject instance. Return > None if the string does not match the pattern; note that this is > different from a zero-length match. Note: If you want to locate a match anywhere in string, use search() instead. re.search is more broad : is the word **anywhere** in the sentence? re.match is faster but more specific : does the sentence **begin with** the word? ### numbers ``` # digits only numbers = r'\d+' nasa_briefing = '''This Saturday at 5:51 a.m. PDT, NASA's Juno \ spacecraft will get closer to the cloud tops of Jupiter than \ at any other time during its prime mission. At the moment of \ closest approach, Juno will be about 2,600 miles (4,200 kilometers) \ above Jupiter's swirling clouds and traveling at 130,000 mph \ (208,000 kilometers per hour) with respect to the planet. ''' re.findall(numbers, nasa_briefing) ``` This does not seem quite right: as all the "," and ":" separated out numbers which are related. Let's fix the issue with the thousands ",": ``` numbers = r'(\d+,\d+|\d+)' re.findall(numbers, nasa_briefing) ``` Alternatively, we can use a different regex to accomplish the same thing: ``` numbers = r'(\d*,?\d+)' re.findall(numbers, nasa_briefing) ``` Better result -- but our time representation still need fixing: ``` numbers = r'(\d*,?:?\d+)' re.findall(numbers, nasa_briefing) ``` ### named groups ``` city_state = r'(?P<city>[\w\s]+), (?P<state>[A-Z]{2})' sentence = "1600 Amphitheatre Pkwy, Mountain View, CA 94043" re.findall(city_state, sentence) for city_st in re.finditer(city_state, sentence): print("city: {}".format(city_st.group('city'))) print("state: {}".format(city_st.group('state'))) ```
github_jupyter
import re # regex string: for any alphanumeric character, and greedy "+" word = r'\w+' sentence = "I am Sam; Sam I am." re.findall(word, sentence) sresult = re.search(word, sentence) sresult.group() mresult = re.match(word, sentence) mresult.group() capitalized_word = r'[A-Z]\w+' sresult = re.search(capitalized_word, sentence) sresult sresult.group() mresult = re.match(capitalized_word, sentence) mresult # digits only numbers = r'\d+' nasa_briefing = '''This Saturday at 5:51 a.m. PDT, NASA's Juno \ spacecraft will get closer to the cloud tops of Jupiter than \ at any other time during its prime mission. At the moment of \ closest approach, Juno will be about 2,600 miles (4,200 kilometers) \ above Jupiter's swirling clouds and traveling at 130,000 mph \ (208,000 kilometers per hour) with respect to the planet. ''' re.findall(numbers, nasa_briefing) numbers = r'(\d+,\d+|\d+)' re.findall(numbers, nasa_briefing) numbers = r'(\d*,?\d+)' re.findall(numbers, nasa_briefing) numbers = r'(\d*,?:?\d+)' re.findall(numbers, nasa_briefing) city_state = r'(?P<city>[\w\s]+), (?P<state>[A-Z]{2})' sentence = "1600 Amphitheatre Pkwy, Mountain View, CA 94043" re.findall(city_state, sentence) for city_st in re.finditer(city_state, sentence): print("city: {}".format(city_st.group('city'))) print("state: {}".format(city_st.group('state')))
0.283881
0.77373
``` # %matplotlib inline import pandas as pd ``` ### Download the data and load it to Pandas. You can find them [here](https://drive.google.com/file/d/1NY6cmF9Shjw-dD7BD6bNmfcIVz-kQcFR/view?usp=sharing). ``` titles = pd.read_csv('data/titles.csv') titles.head() ``` ### How many movies are listed in the titles dataframe? ``` titles['title'].value_counts().sum() ``` ### What are the earliest two films listed in the titles dataframe? ``` titles.sort_values(by=['year']).head(2) ``` ### How many movies have the title "Hamlet"? ``` filter_hamlet = (titles['title'] == 'Hamlet') titles[filter_hamlet]['title'].count() ``` ### How many movies are titled "North by Northwest"? ``` filter_north = (titles['title'] == 'North by Northwest') titles[filter_north]['title'].count() ``` ### When was the first movie titled "Hamlet" made? ``` titles[filter_hamlet].sort_values(by='year').head(1) ``` ### List all of the "Treasure Island" movies from earliest to most recent. ``` filter_treasureisland = (titles['title'] == 'Treasure Island') titles[filter_treasureisland].sort_values(by='year') ``` ### How many movies were made in the year 1950? ``` filter_1950 = (titles['year'] == 1950) titles[filter_1950]['title'].count() ``` ### How many movies were made in the year 1960? ``` filter_1960 = (titles['year'] == 1960) titles[filter_1960]['title'].count() ``` ### How many movies were made from 1950 through 1959? ``` filter_50to59 = (titles['year'] >= 1950) & (titles['year'] <= 1959) titles[filter_50to59]['title'].count() ``` ### In what years has a movie titled "Batman" been released? ``` filter_batman = titles['title'] == 'Batman' titles[filter_batman]['year'] ``` ### How many roles were there in the movie "Inception"? ``` cast = pd.read_csv('data/cast.csv', sep=",") cast.head() filter_inception = cast['title'] == 'Inception' cast[filter_inception]['type'].count() ``` ### How many roles in the movie "Inception" are NOT ranked by an "n" value? ``` filter_inception = (cast['title'] == 'Inception') & (cast['n'].isnull()) cast[filter_inception]['type'].count() ``` ### But how many roles in the movie "Inception" did receive an "n" value? ``` filter_inception = (cast['title'] == 'Inception') & (cast['n'] > 0) cast[filter_inception]['type'].count() ``` ### Display the cast of "North by Northwest" in their correct "n"-value order, ignoring roles that did not earn a numeric "n" value. ``` filter_north = (cast['title'] == 'North by Northwest') & (cast['n'] > 0) cast[filter_north].sort_values(by='n') ``` ### Display the entire cast, in "n"-order, of the 1972 film "Sleuth". ``` filter_sleuth = (cast['title'] == 'Sleuth') & (cast['year'] == 1972) & (cast['n'] > 0) cast[filter_sleuth].sort_values(by='n') ``` ### Now display the entire cast, in "n"-order, of the 2007 version of "Sleuth". ``` filter_sleuth = (cast['title'] == 'Sleuth') & (cast['year'] == 2007) & (cast['n'] > 0) cast[filter_sleuth].sort_values(by='n') ``` ### How many roles were credited in the silent 1921 version of Hamlet? ``` filter_hamlet = (cast['title'] == 'Hamlet') & (cast['year'] == 1921) & (cast['n'] > 0) cast[filter_hamlet]['type'].count() ``` ### How many roles were credited in Branagh’s 1996 Hamlet? ``` filter_hamlet = (cast['title'] == 'Hamlet') & (cast['year'] == 1996) & (cast['n'] > 0) cast[filter_hamlet]['type'].count() ``` ### How many "Hamlet" roles have been listed in all film credits through history? ``` filter_hamlet = (cast['title'] == 'Hamlet') & (cast['n'] > 0) cast[filter_hamlet]['type'].count() ``` ### How many people have played an "Ophelia"? ``` filter_ophelia = cast['character'] == 'Ophelia' cast[filter_ophelia].count() ``` ### How many people have played a role called "The Dude"? ``` filter_dude = cast['character'] == 'The Dude' cast[filter_dude].count() ``` ### How many people have played a role called "The Stranger"? ``` filter_stranger = cast['character'] == 'The Stanger' cast[filter_stranger].count() ``` ### How many roles has Sidney Poitier played throughout his career? ``` filter_poitier = cast['name'] == 'Sidney Poitier' cast[filter_poitier]['type'].count() ``` ### How many roles has Judi Dench played? ``` filter_dench = cast['name'] == 'Judi Dench' cast[filter_dench]['type'].count() ``` ### List the supporting roles (having n=2) played by Cary Grant in the 1940s, in order by year. ``` filter_grant = (cast['name'] == 'Cary Grant') & (cast['year'] >= 1940) & (cast['year'] <= 1949) & (cast['n'] == 2) cast[filter_grant].sort_values(by='year') ``` ### List the leading roles that Cary Grant played in the 1940s in order by year. ``` filter_grant = (cast['name'] == 'Cary Grant') & (cast['year'] >= 1940) & (cast['year'] <= 1949) & (cast['n'] == 1) cast[filter_grant].sort_values(by='year') ``` ### How many roles were available for actors in the 1950s? ``` filter_actors = (cast['type'] == 'actor') & (cast['year'] >= 1950) & (cast['year'] <= 1959) cast[filter_actors].count() ``` ### How many roles were avilable for actresses in the 1950s? ``` filter_actress = (cast['type'] == 'actress') & (cast['year'] >= 1950) & (cast['year'] <= 1959) cast[filter_actress].count() ``` ### How many leading roles (n=1) were available from the beginning of film history through 1980? ``` filter_lead = (cast['n'] == 1) & (cast['year'] <= 1980) cast[filter_lead]['n'].count() ``` ### How many non-leading roles were available through from the beginning of film history through 1980? ``` filter_nonlead = (cast['n'] > 1) & (cast['year'] <= 1980) cast[filter_nonlead]['n'].count() ``` ### How many roles through 1980 were minor enough that they did not warrant a numeric "n" rank? ``` filter_norank = (cast['n'].isnull()) & (cast['year'] <= 1980) cast[filter_norank]['character'].count() ```
github_jupyter
# %matplotlib inline import pandas as pd titles = pd.read_csv('data/titles.csv') titles.head() titles['title'].value_counts().sum() titles.sort_values(by=['year']).head(2) filter_hamlet = (titles['title'] == 'Hamlet') titles[filter_hamlet]['title'].count() filter_north = (titles['title'] == 'North by Northwest') titles[filter_north]['title'].count() titles[filter_hamlet].sort_values(by='year').head(1) filter_treasureisland = (titles['title'] == 'Treasure Island') titles[filter_treasureisland].sort_values(by='year') filter_1950 = (titles['year'] == 1950) titles[filter_1950]['title'].count() filter_1960 = (titles['year'] == 1960) titles[filter_1960]['title'].count() filter_50to59 = (titles['year'] >= 1950) & (titles['year'] <= 1959) titles[filter_50to59]['title'].count() filter_batman = titles['title'] == 'Batman' titles[filter_batman]['year'] cast = pd.read_csv('data/cast.csv', sep=",") cast.head() filter_inception = cast['title'] == 'Inception' cast[filter_inception]['type'].count() filter_inception = (cast['title'] == 'Inception') & (cast['n'].isnull()) cast[filter_inception]['type'].count() filter_inception = (cast['title'] == 'Inception') & (cast['n'] > 0) cast[filter_inception]['type'].count() filter_north = (cast['title'] == 'North by Northwest') & (cast['n'] > 0) cast[filter_north].sort_values(by='n') filter_sleuth = (cast['title'] == 'Sleuth') & (cast['year'] == 1972) & (cast['n'] > 0) cast[filter_sleuth].sort_values(by='n') filter_sleuth = (cast['title'] == 'Sleuth') & (cast['year'] == 2007) & (cast['n'] > 0) cast[filter_sleuth].sort_values(by='n') filter_hamlet = (cast['title'] == 'Hamlet') & (cast['year'] == 1921) & (cast['n'] > 0) cast[filter_hamlet]['type'].count() filter_hamlet = (cast['title'] == 'Hamlet') & (cast['year'] == 1996) & (cast['n'] > 0) cast[filter_hamlet]['type'].count() filter_hamlet = (cast['title'] == 'Hamlet') & (cast['n'] > 0) cast[filter_hamlet]['type'].count() filter_ophelia = cast['character'] == 'Ophelia' cast[filter_ophelia].count() filter_dude = cast['character'] == 'The Dude' cast[filter_dude].count() filter_stranger = cast['character'] == 'The Stanger' cast[filter_stranger].count() filter_poitier = cast['name'] == 'Sidney Poitier' cast[filter_poitier]['type'].count() filter_dench = cast['name'] == 'Judi Dench' cast[filter_dench]['type'].count() filter_grant = (cast['name'] == 'Cary Grant') & (cast['year'] >= 1940) & (cast['year'] <= 1949) & (cast['n'] == 2) cast[filter_grant].sort_values(by='year') filter_grant = (cast['name'] == 'Cary Grant') & (cast['year'] >= 1940) & (cast['year'] <= 1949) & (cast['n'] == 1) cast[filter_grant].sort_values(by='year') filter_actors = (cast['type'] == 'actor') & (cast['year'] >= 1950) & (cast['year'] <= 1959) cast[filter_actors].count() filter_actress = (cast['type'] == 'actress') & (cast['year'] >= 1950) & (cast['year'] <= 1959) cast[filter_actress].count() filter_lead = (cast['n'] == 1) & (cast['year'] <= 1980) cast[filter_lead]['n'].count() filter_nonlead = (cast['n'] > 1) & (cast['year'] <= 1980) cast[filter_nonlead]['n'].count() filter_norank = (cast['n'].isnull()) & (cast['year'] <= 1980) cast[filter_norank]['character'].count()
0.218669
0.877214
# CAR PURCHASING DOLLAR AMOUNT USING ANNs # PROBLEM STATEMENT You are working as a car salesman and you would like to develop a model to predict the total dollar amount that customers are willing to pay given the following attributes: - Customer Name - Customer e-mail - Country - Gender - Age - Annual Salary - Credit Card Debt - Net Worth The model should predict: - Car Purchase Amount # STEP #0: LIBRARIES IMPORT ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns ``` # STEP #1: IMPORT DATASET ``` car_df = pd.read_csv('Car_Purchasing_Data.csv', encoding='ISO-8859-1') car_df ``` # STEP #2: VISUALIZE DATASET ``` sns.pairplot(car_df) ``` # STEP #3: CREATE TESTING AND TRAINING DATASET/DATA CLEANING ``` X = car_df.drop(['Customer Name', 'Customer e-mail', 'Country', 'Car Purchase Amount'], axis = 1) X y = car_df['Car Purchase Amount'] y.shape from sklearn.preprocessing import MinMaxScaler scaler_x = MinMaxScaler() X_scaled = scaler_x.fit_transform(X) scaler_x.data_max_ scaler_x.data_min_ print(X_scaled) X_scaled.shape y.shape y = y.values.reshape(-1,1) y.shape y scaler_y = MinMaxScaler() y_scaled = scaler_y.fit_transform(y) y_scaled ``` # STEP#4: TRAINING THE MODEL ``` from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X_scaled, y_scaled, test_size = 0.25) import tensorflow.keras from keras.models import Sequential from keras.layers import Dense from sklearn.preprocessing import MinMaxScaler model = Sequential() model.add(Dense(25, input_dim=5, activation='relu')) model.add(Dense(25, activation='relu')) model.add(Dense(1, activation='linear')) model.summary() model.compile(optimizer='adam', loss='mean_squared_error') epochs_hist = model.fit(X_train, y_train, epochs=20, batch_size=25, verbose=1, validation_split=0.2) ``` # STEP#5: EVALUATING THE MODEL ``` print(epochs_hist.history.keys()) plt.plot(epochs_hist.history['loss']) plt.plot(epochs_hist.history['val_loss']) plt.title('Model Loss Progression During Training/Validation') plt.ylabel('Training and Validation Losses') plt.xlabel('Epoch Number') plt.legend(['Training Loss', 'Validation Loss']) # Gender, Age, Annual Salary, Credit Card Debt, Net Worth # ***(Note that input data must be normalized)*** X_test_sample = np.array([[0, 0.4370344, 0.53515116, 0.57836085, 0.22342985]]) #X_test_sample = np.array([[1, 0.53462305, 0.51713347, 0.46690159, 0.45198622]]) y_predict_sample = model.predict(X_test_sample) print('Expected Purchase Amount=', y_predict_sample) y_predict_sample_orig = scaler_y.inverse_transform(y_predict_sample) print('Expected Purchase Amount=', y_predict_sample_orig) ```
github_jupyter
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns car_df = pd.read_csv('Car_Purchasing_Data.csv', encoding='ISO-8859-1') car_df sns.pairplot(car_df) X = car_df.drop(['Customer Name', 'Customer e-mail', 'Country', 'Car Purchase Amount'], axis = 1) X y = car_df['Car Purchase Amount'] y.shape from sklearn.preprocessing import MinMaxScaler scaler_x = MinMaxScaler() X_scaled = scaler_x.fit_transform(X) scaler_x.data_max_ scaler_x.data_min_ print(X_scaled) X_scaled.shape y.shape y = y.values.reshape(-1,1) y.shape y scaler_y = MinMaxScaler() y_scaled = scaler_y.fit_transform(y) y_scaled from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X_scaled, y_scaled, test_size = 0.25) import tensorflow.keras from keras.models import Sequential from keras.layers import Dense from sklearn.preprocessing import MinMaxScaler model = Sequential() model.add(Dense(25, input_dim=5, activation='relu')) model.add(Dense(25, activation='relu')) model.add(Dense(1, activation='linear')) model.summary() model.compile(optimizer='adam', loss='mean_squared_error') epochs_hist = model.fit(X_train, y_train, epochs=20, batch_size=25, verbose=1, validation_split=0.2) print(epochs_hist.history.keys()) plt.plot(epochs_hist.history['loss']) plt.plot(epochs_hist.history['val_loss']) plt.title('Model Loss Progression During Training/Validation') plt.ylabel('Training and Validation Losses') plt.xlabel('Epoch Number') plt.legend(['Training Loss', 'Validation Loss']) # Gender, Age, Annual Salary, Credit Card Debt, Net Worth # ***(Note that input data must be normalized)*** X_test_sample = np.array([[0, 0.4370344, 0.53515116, 0.57836085, 0.22342985]]) #X_test_sample = np.array([[1, 0.53462305, 0.51713347, 0.46690159, 0.45198622]]) y_predict_sample = model.predict(X_test_sample) print('Expected Purchase Amount=', y_predict_sample) y_predict_sample_orig = scaler_y.inverse_transform(y_predict_sample) print('Expected Purchase Amount=', y_predict_sample_orig)
0.730001
0.817319
<center><br> ## Открытый курс по машинному обучению. Сессия № 3 <img src="https://article.images.consumerreports.org/c_lfill,w_1920,ar_32:11/prod/content/dam/CRO%20Images%202017/Cars/March/CR-Cars-Hero-Used-Car-Sales-03-17"> ### <center> Автор материала: Юлия Климушина ## <center> Прогноз цен на подержанные автомобили </center> ### <center> Индивидуальный проект по анализу данных В этом проекте мы будем решать задачу восстановления регрессии. Данные, используемые в этом проекте, можно скачать [тут](https://www.kaggle.com/orgesleka/used-cars-database). Датасет содержит информацию о подержанных автомобилях, выставленных на продажу на Ebay в марте-апреле 2016. Данные представлены на немецком языке. Цель исследования: создание модели, предсказывающую цену автомобиля на вторичном рынке. Такая модель может помочь: - владельцу авто, желающему продать своего железого коня, не продешевить; - покупателю не переплатить; - диллеру, занимающемуся перепродажей машин, определить насколько выгодно конкретное предложение, за какую цену можно перепродать автомобиль. ``` import numpy as np import pandas as pd import seaborn as sns from matplotlib import pyplot as plt %matplotlib inline from sklearn.linear_model import LinearRegression, Ridge, Lasso from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.model_selection import cross_val_score, train_test_split from sklearn.model_selection import GridSearchCV, learning_curve, validation_curve, KFold from sklearn.preprocessing import StandardScaler, PolynomialFeatures from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score from xgboost import XGBRegressor from scipy.sparse import csr_matrix, hstack import warnings warnings.filterwarnings('ignore') RANDOM_SEED = 17 kf = KFold(n_splits=5, shuffle=True, random_state=RANDOM_SEED) ``` ### 1. Описание набора данных и признаков #### Список признаков: - **dateCrawled**: дата и время первого просмотра объявления - **name** : название машины (сформировано из названия марки, модели и другой информации) - **seller** : кто продает ('privat' - частное лицо, 'gewerblich' - диллер) - **offerType** : тип предложения ('Angebot' - продажа, 'Gesuch' - покупка) - **price** : цена - **abtest** : A/B тест. Покопавшись в интернете, я выяснила, что A/B тестирование - это тактика, с помощью которой макетологи выясняют, какие заголовки объявления, текст, изображения, призывы к действию будут лучше работать для целевой аудитории. - **vehicleType** : тип авто ('coupe', 'suv', 'kleinwagen', 'limousine', 'cabrio', 'bus', 'kombi', 'andere' - "купе", "внедорожник", "миниавто", "седан", "кабриолет", "автобус", "комби", "другое", соответственно) - **yearOfRegistration** : в каком году машина была впервые зарегистрирована - **gearbox** : тип коробки передач ('manuell' - ручная, 'automatik' - автоматическая) - **powerPS** : мощность - **model** : модель - **kilometer** : пробег в километрах - **monthOfRegistration** : в каком месяце машина была впервые зарегистрирована - **fuelType** : тип топлива - **brand** : марка - **notRepairedDamage** : есть ли повреждения, требующие ремонта ('ja' - да, 'nein' - нет) - **dateCreated** : дата и время создания объявления на eBay - **nrOfPictures** : количество фотографий автомобиля (к сожалению, это поле везде содержит нули и поэтому бесполезно) - **postalCode** : почтовый индекс - **lastSeenOnline** : дата и время последнего просмотра объявления Целевая переменная: **price** - цена автомобиля. Перед нами стоит задача восстановления регрессии. ### 2. Первичный анализ и обработка признаков Посмотрим на данные ``` parse_dates = ['dateCrawled', 'dateCreated', 'lastSeen'] df = pd.read_csv('data/autos.csv', sep=',', parse_dates=parse_dates, encoding = 'ISO-8859-1') df.head() df.shape df.info() ``` Давайте избавимся от пропусков и не информативных признаков. **abtest** явно лишний признак, так как не имеет отношения к автомобилям как таковым. Насколько мне удалось выяснить, это некий показатель, используемый Ebay для определения эффективности рекламы. Меня интересуют только объявления о продаже, поэтому я удалю строки о покупке и признак **offerType**.<br> Признак **vehicleType** содержит пропуски. Можно заменить их значением, обозначающим "другое" (andere)<br> Строки с пропусками в **gearbox** удалим. ``` df.drop([ 'abtest'], axis=1, inplace=True) df = df[df['offerType'] != 'Gesuch'].drop('offerType', axis=1) df['vehicleType'].fillna(axis=0, value='andere', inplace=True) df.dropna(axis=0, subset=['gearbox'], inplace=True) ``` Давайте посмотрим на признак **brand**. ``` df['brand'].unique() ``` "sonstige_autos" означает "прочие автомобили". Строк с такими значения немного и анализ показывает, что это старые и/или редкие машины, информации по которым не достаточно, чтобы строить прогноз, к тому же поле **model** у них не заполнено, поэтому удалим такие строки. В признаке **model** 13433 пропусков, зато **brand** всегда заполнен. Учитывая, что **name** часто содержит в себе информацию о марке и модели, достанем модель оттуда. Строки, которые не подойдут под алгоритм, удалим. После этой операции удалим переменную **name**, он нам больше не пригодится. ``` df = df[df['brand'] != "sonstige_autos"] model_nan_idx = df[pd.isnull(df["model"])].index def model_extractor(x): x = x.apply(str.lower) name = x['name'].split(sep='_') try: if name[0] == x["brand"]: return name[1] else: return np.nan except: return np.nan df.loc[model_nan_idx, 'model'] = df.loc[model_nan_idx][['name', 'brand']].apply(model_extractor, axis=1) df.dropna(axis=0, subset=['model'], inplace=True) df.drop('name', axis=1, inplace=True) ``` Разберемся с топливом **fuelType**. Заменим пустующие значения 'andere' ('другое'). ``` df['fuelType'].value_counts(dropna=False) df['fuelType'].fillna(axis=0, value='andere', inplace=True) ``` Признак **notRepairedDamage** имеет 56335 пропусков. Можно исходить из предположения, что если владелец не упомянул в объявлении про повреждения, то он продает её как не требующую ремонта. Пометим такие пропуски как 'nein' и приведем к бинарному формату. ``` df['notRepairedDamage'].fillna(value='nein', inplace=True) df['notRepairedDamage'] = df['notRepairedDamage'].map({'ja': 1, 'nein': 0}).astype('int64') ``` Посмотрим на статистику. ``` df.describe(include='all').T ``` Основные выводы: **nrOfPictures** по нулям. Удаляем этот признак. в **price** наблюдаются большие выбросы (10 в восьмой многовато даже для Bloodhound SSC) **kilometer** - имеет скошенное влево распределение ``` df.drop('nrOfPictures', axis=1, inplace=True) ``` ### 3. Визуальный анализ признаков. Особенности данных. Посмотрим на разброс значений количественных признаков: цены, года первой регистации и мощности. ``` feats = ['price', 'yearOfRegistration', 'powerPS'] fig, axes = plt.subplots(ncols=len(feats), nrows=1, figsize=(18,6)) for i, feat in enumerate(feats): sns.boxplot(df[feat], ax=axes[i], orient='v', width=0.5, color='g'); axes[i].set_ylabel('') axes[i].set_title(feat) ``` Из-за выбросов ничего не разобрать. Начнем с цен, определим пороги отсечения выбросов. Просмотр объявлений о продаже показали, что старая машина (15-20 лет) может стоить в районе 100 евро. В качестве верхней границы возьмем 150000. Именно столько стоят Porsche, которых не так уж мало в наборе. ``` df = df[(df['price'] >= 100) & (df['price'] <= 150000)] ``` Посмотрим на год первой регистрации. Это важный признак, т.к. возраст автомобиля один из ключевых факторов, влияющих на его цену. Очевидно, что год регистрации не может быть позднее, чем год размешения объявлений. Объявления размещены в марте и апреле 2016, поэтому 2016-й год также не будем рассматривать. 17141 автомобилей зарегистрированы в 2016 и позднее. Удалим эти строки и машины старее 1976 года, то есть оставляем период в 40 лет. ``` df = df[(df['yearOfRegistration'] >= 1976) & (df['yearOfRegistration'] < 2016)] ``` В выборке есть достаточно автомобилей марки Porsche, мощность двигателей которых может превышать 500 л.с. Также есть авто марки Fiat с мощность движка не превышающим 30 лошадок. Возьмем ннтервал допустимых значений (20, 600). И построим ящики с усами. ``` df = df[(df['powerPS'] > 20) & (df['powerPS'] < 600)] from matplotlib.ticker import FuncFormatter feats = ['price', 'yearOfRegistration', 'powerPS'] fig, axis = plt.subplots(ncols=3, figsize=(18, 6)) for i, feat in enumerate(feats): sns.boxplot(np.log(df[feat]), ax=axis[i], orient='v', width=0.5, color='g'); y_formatter = FuncFormatter(lambda x, pos: ('%i')%(np.exp(x))) axis[i].yaxis.set_major_formatter(y_formatter) ``` Признак **monthOfRegistration** целочисленный, но для модели не имеет смысла сравнивать январь с сентябрем, поэтому переквалифицируем его в категориальный. Месяц 0 будем рассматривать, как "не определено". Как видно из гистограммы с марта по июль публикуется больше объявлений о продаже авто, чем в остальные месяцы. ``` df['monthOfRegistration'] = df['monthOfRegistration'].astype('object') plt.figure(figsize=(10, 5)) sns.countplot(df['monthOfRegistration']); ``` Посмотрим на распределение количественных и бинарных признаков. ``` df.hist(figsize=(15, 10)); print('Доля машин с пробегом 150000 км: ', df[df['kilometer'] == 150000].shape[0] / df.shape[0]) ``` Обратим внимание на пробег. Медиана и максимум = 150000. 65% машин имеют пробег 150000 км и мы имеем скошенное распределение (длинный левый хвост). У распределений целевой переменной **price** и мощности **powerPS** видим длинный правый хвост. Попробуем преобразовать данные, с тем, чтобы приблизить их распределения к нормальному. Для этого пробег возведем в степень, а **price** и **powerPS** - логарифмируем. ``` fig, axes = plt.subplots(ncols=3, figsize=(15,5)) (df['kilometer'] ** 2).hist(ax=axes[0]); np.log1p(df['price']).hist(ax=axes[1]); np.log1p(df['powerPS']).hist(ax=axes[2]); ``` Изобразим матрицу корреляции ``` pal = sns.light_palette("green", as_cmap=True) fig, ax = plt.subplots(figsize=(12,8)) sns.heatmap(df.corr(), cmap="RdBu_r", annot=True, fmt = '.2f', ax=ax); ``` **Price** положительно коррелирует с **yearOfRegistration** и **powerPS** и отрицательно - с **kilometer**. Посмотрим на некоторые категориальные признаки по отдельности. ``` feats = ['seller', 'vehicleType', 'gearbox', 'fuelType'] fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(14,12)) idx = 0 for i in range(2): for j in range(2): sns.countplot(df[feats[idx]], ax=axes[i][j]); idx += 1 ``` Оказывается у нас только одна запись с **seller** == 'gewerblich', так что можно удалить этот признак. fuelType отличный от diesel и benzin объединим в общую группу andere. ``` df.drop('seller', axis=1, inplace=True) df['fuelType'].replace(['lpg', 'hybrid', 'cng', 'elektro'], 'andere', inplace=True) sns.countplot(df['fuelType']); ``` Посмотрим на взаимодействие категориальных признаков и цены. ``` fig, axis = plt.subplots(nrows=1, ncols=2, figsize=(14, 6)) sns.boxplot(x='fuelType', y='price', data=df, ax=axis[0]); sns.boxplot(x='gearbox', y='price', data=df, ax=axis[1]); ``` Автомобили с дизельными двигателями дороже бензиновых и прочих, а машины с автоматической коробкой дороже, чем с ручной. ``` fig, axis = plt.subplots(figsize=(16, 8), ) sns.boxplot(x='brand', y='price', data=df); axis.set_xticklabels(df['brand'].unique(), rotation=80); ``` Из этой картинки следует, что Porsche существенно дороже остальных марок. Признак **postalCode** мы исключим. Если бы рассматривался российский или, к примеру, американский рынок, то имело бы смысл поработать с ним, так как цены от области к области (от штата к штату) варьируются. На Камчатке и Аляске они, вероятно, выше, чем по стране. Но поскольку речь идет о Германии, то отбросим этот признак. ``` df.drop(['postalCode'], axis=1, inplace=True) ``` ### Предобработка данных ``` y = np.log1p(df['price']) X = df.drop(['price'], axis=1) ``` Разделим данные на тренировочную и тестовую части и применим dummy-кодирование к категориальным признакам. ``` X_train, X_test, y_train, y_test = \ train_test_split(X, y, test_size=0.3, shuffle=True, stratify=X['brand'], random_state=RANDOM_SEED) split_idx = X_train.shape[0] for cat_feature in X.columns[X.dtypes == 'object']: X_train[cat_feature] = X_train[cat_feature].astype('category') X_train[cat_feature].cat.set_categories(X_train[cat_feature].unique(), inplace=True) X_test[cat_feature] = X_test[cat_feature].astype('category') X_test[cat_feature].cat.set_categories(X_train[cat_feature].unique(), inplace=True) # В тестовом наборе оказались модели, которых не было в тренировочном, появившиеся NaN замняем "andere" ("другое"). X_test['model'].fillna(value='andere', inplace=True) X_train = pd.get_dummies(X_train, columns=X_train.columns[X_train.dtypes == 'category']) X_test = pd.get_dummies(X_test, columns=X_test.columns[X_test.dtypes == 'category']) X_train.shape, X_test.shape ``` ### Построение базовых моделей. Выбор метрик качества. Давайте построим и сравним линейную модель и случайный лес. ``` # Удаляем даты X_train_base = X_train.drop(['dateCrawled','dateCreated', 'lastSeen'], axis=1) X_test_base = X_test.drop(['dateCrawled','dateCreated', 'lastSeen'], axis=1) ``` #### Базовая модель линейной регрессии Отмасштабируем признаки ``` scaler = StandardScaler().fit(X_train_base) X_train_scaled = scaler.transform(X_train_base) X_test_scaled = scaler.transform(X_test_base) %%time lr = LinearRegression() lr.fit(X_train_scaled, y_train) ``` В качестве метрик качества линейной регрессии выберем MAE за его интерпретируемость. Также посмотрим на коэффициент детерминации, или коэффициент $R^2$. Данная мера качества — это нормированная среднеквадратичная ошибка. Чем она ближе к единице, тем лучше модель объясняет данные. ``` y_preds_lr = lr.predict(X_test_scaled) print('LinearRegression:') print('\tMAE: ', mean_absolute_error(y_test, y_preds_lr)) print('\tR2: ', r2_score(y_test, y_preds_lr)) ``` #### Базовая модель случайного леса ``` %%time rf = RandomForestRegressor(random_state=RANDOM_SEED) rf.fit(X_train_base, y_train) y_preds = rf.predict(X_test_base) print('RandomForestRegressor:') print('\tMAE: ', mean_absolute_error(y_test, y_preds)) print('\tR2: ', r2_score(y_test, y_preds)) print('Target range: (%.2f, %.2f) ' % (y.min(), y.max())) ``` Учитывая, что целевая переменная принимает значения в диапазоне (4.62, 11.51), то ошибка выглядит допустимой. ### Создание новых признаков и описание этого процесса Новые признаки, которые предположительно могут коррелировать с целевым: - **adUpDays** - сколько дней висело объявление - **kilPerYear** - среднегодовой пробег. ``` new_feats_train = pd.DataFrame(index=X_train.index) new_feats_test = pd.DataFrame(index=X_test.index) new_feats_train['adUpDays'] = (X_train['lastSeen'] - X_train['dateCrawled']).dt.days + 1 new_feats_test['adUpDays'] = (X_test['lastSeen'] - X_test['dateCrawled']).dt.days + 1 new_feats_train['age'] = X_train['dateCrawled'].apply(lambda x: x.year) - X_train['yearOfRegistration'] new_feats_test['age'] = X_test['dateCrawled'].apply(lambda x: x.year) - X_test['yearOfRegistration'] new_feats_train['kilPerYear'] = X_train['kilometer'] / new_feats_train['age'] new_feats_test['kilPerYear'] = X_test['kilometer'] / new_feats_test['age'] X_train.drop(['dateCrawled','dateCreated', 'lastSeen'], axis=1, inplace=True) X_test.drop(['dateCrawled','dateCreated', 'lastSeen'], axis=1, inplace=True) new_feats_train['adUpDays'].hist(); new_feats_train['kilPerYear'].hist(); ``` Применим к признакам со смещенным распределением логарифмирование. ``` new_feats_train['kilPerYear_log'] = np.log1p(new_feats_train['kilPerYear']) new_feats_test['kilPerYear_log'] = np.log1p(new_feats_test['kilPerYear']) new_feats_train['powerPS_log'] = np.log1p(X_train['powerPS']) new_feats_test['powerPS_log'] = np.log1p(X_test['powerPS']) ``` Добавление новых признаков и полиномов второй степени улучшило качество линейной модели, но оно всё же хуже, чем у базового случайного леса, поэтому сосредоточимся на последнем. ``` scaler = StandardScaler().fit(X_train[['kilometer', 'yearOfRegistration']]) features_scaled_train = scaler.transform(X_train[['kilometer', 'yearOfRegistration']]) features_scaled_test = scaler.transform(X_test[['kilometer', 'yearOfRegistration']]) poly = PolynomialFeatures(2) X_train_poly = poly.fit_transform(np.concatenate([new_feats_train[['powerPS_log', 'kilPerYear_log', 'adUpDays']], features_scaled_train], axis=1)) X_test_poly = poly.transform(np.concatenate([new_feats_test[['powerPS_log', 'kilPerYear_log', 'adUpDays']], features_scaled_test], axis=1)) X_train_new = np.concatenate([X_train_poly, X_train.drop(['powerPS', 'kilometer', \ 'yearOfRegistration'], axis=1)], axis=1) X_test_new = np.concatenate([X_test_poly, X_test.drop(['powerPS', 'kilometer', \ 'yearOfRegistration'], axis=1)], axis=1) %%time lr = LinearRegression() lr.fit(X_train_new, y_train) y_preds_lr = lr.predict(X_test_new) print('LinearRegression:') print('\tMAE: ', mean_absolute_error(y_test, y_preds_lr)) print('\tR2: ', r2_score(y_test, y_preds_lr)) ``` Модель случайного леса немного улучшилась при добавлении **adUpDays**. Добавление **kilPerYear** никак не повлияло на качество. ``` X_train_new = pd.concat([X_train, new_feats_train[['kilPerYear']]], axis=1) X_test_new = pd.concat([X_test, new_feats_test[['kilPerYear']]], axis=1) %%time rf = RandomForestRegressor(random_state=RANDOM_SEED) rf.fit(X_train_new, y_train) y_preds = rf.predict(X_test_new) print('RandomForestRegressor:') print('\tMAE: ', mean_absolute_error(y_test, y_preds)) print('\tR2: ', r2_score(y_test, y_preds)) X_train_new = pd.concat([X_train, new_feats_train[['adUpDays']]], axis=1) X_test_new = pd.concat([X_test, new_feats_test[['adUpDays']]], axis=1) %%time rf = RandomForestRegressor(random_state=RANDOM_SEED) rf.fit(X_train_new, y_train) y_preds = rf.predict(X_test_new) print('RandomForestRegressor:') print('\tMAE: ', mean_absolute_error(y_test, y_preds)) print('\tR2: ', r2_score(y_test, y_preds)) ``` Какие же признаки оказались наиболее важными для модели случайного леса. Как видно из графика ниже, наиболее важным признаком оказался год регистрации, второй по важности признак - мощность двигателя, третий - **notRepairedDamage**. ``` features = X_train_new.columns importances = rf.feature_importances_ num_to_plot = 10 indices = np.argsort(importances)[::-1][:num_to_plot] plt.figure(figsize=(14,6)) plt.title('Feature impotances for a RandomForestRegressor model', size=15) bars = plt.bar(range(num_to_plot), importances[indices], align="center") ticks = plt.xticks(range(num_to_plot), features.values[indices[:]], rotation=70, size=13) ``` ### Построение кривых валидации Попробуем улучшить результат случайного леса. Для начала посмотрим как ведут себя кривые валидации при изменении основных параметров. Начнем с количества деревьев: ``` def valid_curves_plot(hyperparam, param_range): param_range = param_range train_scores, test_scores = validation_curve(RandomForestRegressor(random_state=RANDOM_SEED), X_train_new, y_train, param_name=hyperparam, param_range=param_range, cv=kf, n_jobs=-1, scoring='r2') train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.figure(figsize=(9,6)) plt.title('Validation curves for a RandomForestRegressor model') plt.xlabel(hyperparam) plt.ylabel('R2') plt.ylim(0.8, 1.0) plt.fill_between(param_range, train_scores_mean - train_scores_std, \ train_scores_mean + train_scores_std, alpha=0.2, color="r") plt.plot(param_range, train_scores_mean, label='Training error', color="r") plt.fill_between(param_range, test_scores_mean - test_scores_std, \ test_scores_mean + test_scores_std, alpha=0.2, color="g") plt.plot(param_range, test_scores_mean, label='Validation error', color="g") plt.legend(loc="best") plt.xticks(param_range) plt.show() %%time valid_curves_plot(hyperparam='n_estimators', param_range=[5, 10, 15, 20, 30, 50, 75, 100]) ``` Как видно при достижении 30 деревьев точность модели на тесте выходит на асимптоту. Давайте посмотрим какие параметры регуляризации добавить в модель, чтобы недопустить переобучение. Посмотрим как ведет себя модель в зависимости от параметра максимальной глубины – `max_depth`. ``` %%time valid_curves_plot(hyperparam='max_depth', param_range=[3, 5, 7, 9, 11, 13, 15, 17, 20, 22, 24]) ``` Как видим, строить деревья глубиной более 22 смысла не имеет, качество на тесте выходит на ассиптоту.<br> <br> Построим кривые валидации для параметра `min_samples_leaf`. ``` %%time valid_curves_plot(hyperparam='min_samples_leaf', param_range=[1, 3, 5, 7, 9, 11, 13]) ``` Как мы видим на тесте максимальное качество достигается, если минимальном числе объектов в листе 3. <br> <br> Параметр `max_features` определяет количество случайных признаков из `n` исходных. Для задач регрессии рекомендуется использовать $\frac{n}{3}$. Давайте определим оптимальный параметр для нашего случая. ``` %%time valid_curves_plot(hyperparam='max_features', param_range=[50, 100, 200, 300, 400, 500, 600, 700]) ``` **max_features** = 200 - оптимальный вариант. ### Кросс-валидация, подбор параметров ``` # Сделаем инициализацию параметров, по которым хотим сделать полный перебор parameters = {'max_features': [100,200, 300, 400, 500], 'min_samples_leaf': [1, 3, 5, 7], 'max_depth': [13, 15, 17, 20, 22, 24]} rfc = RandomForestRegressor(n_estimators=30, random_state=RANDOM_SEED, n_jobs=-1) gcv = GridSearchCV(rfc, parameters, n_jobs=-1, cv=kf) gcv.fit(X_train_new, y_train) gcv.best_estimator_, gcv.best_score_ ``` ### Прогноз для тестовой выборки Обучим случайный лес с оптимальными гиперпараметрами и получим прогноз для тестовой выборки. ``` %%time gcv.best_estimator_.fit(X_train_new, y_train) y_preds = gcv.best_estimator_.predict(X_test_new) print('RandomForestRegressor:') print('\tMAE: ', mean_absolute_error(y_test, y_preds)) print('\tR2: ', r2_score(y_test, y_preds)) ``` Нам удалось выиграть "аж" 0.01 на обоих метриках. ### Оценка модели Переведем целевой признак обратно в километры и посмотрим насколько хорошо наша модель отработала на тестовом наборе. Для начала сравним реальные и предсказанные цены по нескольким примерам. ``` y_test_preds = pd.DataFrame({'y_true': np.exp(y_test) - 1, 'y_preds': np.exp(y_preds) - 1}) y_test_preds['y_true'] =round(y_test_preds['y_true'], 2) y_test_preds['y_preds'] =round(y_test_preds['y_preds'], 2) y_test_preds['diff'] = np.absolute(y_test_preds['y_true'] - y_test_preds['y_preds']) y_test_preds.head(20).T ``` Как видим модель довольно сильно ошибается в некоторых случаях, но в целом закономерности в данных выявлены и результат выглядит неплохо. На графике ниже видим рассеяние реальной цены vs. предсказанной цены относительно линии идентичности (красная линия). Хорошо видно, что чем больше цена, тем сильнее ошибается модель и видно тенденцию к недооценке. Очевидно это связано с недостаточностью данных по дорогим автомобилям. ``` from ggplot import * ggplot(y_test_preds, aes(x='y_true',y='y_preds')) + \ geom_point(color='blue') + \ xlim(-1e03, 1e+05) + ylim(-1e03, 1e+05) + \ ggtitle("RandomForestRegressor model") + \ xlab("True price") + ylab("Predicted price") + \ geom_abline(intercept=0, size=2, color='red') ``` Также видим в самом начале какие-то сильные выбросы. Посмотрев на данные, становится ясно, что модель предсказывает более высокую стоимость машин, выставленных за "бесценок". Возможно продавцы указывают низкую цену, чтобы привлечь покупателей, а может забыли указать, что машина требует ремонта. ``` y_test_preds.sort_values('y_true').head() ``` ### Построение кривых обучения Давайте построим кривые обучения для всего объема данных. ``` %%time plt.figure(figsize=(12, 8)) plt.title("Learning curves for a RandomForestRegressor model") plt.xlabel("Training examples") plt.ylabel("MSE") train_sizes, train_scores, test_scores = \ learning_curve(RandomForestRegressor(max_depth=24, max_features=200, min_samples_leaf=1, n_estimators=30, n_jobs=-1, random_state=RANDOM_SEED), np.concatenate([X_train, X_test]), np.concatenate([y_train, y_test]), cv=kf, train_sizes=np.linspace(0.1, 1.0, 5), random_state=RANDOM_SEED) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.grid() plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training error") plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Validation error") plt.legend() ``` Большое расстояние между кривыми указывает на переобучение. ### Выводы Сырые данные содержали много пропусков и выбросы. Нам потребовалось провести значительную обработку и фильтрацию. К категориальным признакам мы применили one-hot encoding. Целевой признак имел сильно скошенное распределение, поэтому мы применили к нему логарифмическое преобразование. Мы сравнили две модели и пришли к выводу, что линейная модель с полиномиальными признаками дает MAE: 0.31 и $R^2$: 0.85, в то время как случайный лес "из коробки" сразу выдал MAE: 0.28 и $R^2$: 0.87. К сожалению, нам не удалось синтезировать признаки, улучшающие этот результат случайного леса. Настройка гиперпараметров привела к незначительному росту качества: MAE: 0.27 и $R^2$: 0.88. Учитывая, что случайный лес показал неплохие результаты, с моей стороны было бы упущением не попровать бустинг на наших данных. Я воспользовалась питоновской реализацией XGBoost, понастраивала параметры с помощью hyperopt, но значимых улучшений не получила, поэтому решила не докучать читателям и опустила выкладки. Таким образом можно заключить, что применительно к этой задаче случайный лес сработал хорошо. Исходя из кривых обучения можно сделать вывод, чтобы улучшить модель можно: - увеличить размер выборки; - понизить сложность модели. Этого можно добиться, если использовать меньший набор признаков или увеличить регуляризацию (к примеру, можно снизить глубину деревьев или увеличить минимальном число объектов в листе).
github_jupyter
import numpy as np import pandas as pd import seaborn as sns from matplotlib import pyplot as plt %matplotlib inline from sklearn.linear_model import LinearRegression, Ridge, Lasso from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.model_selection import cross_val_score, train_test_split from sklearn.model_selection import GridSearchCV, learning_curve, validation_curve, KFold from sklearn.preprocessing import StandardScaler, PolynomialFeatures from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score from xgboost import XGBRegressor from scipy.sparse import csr_matrix, hstack import warnings warnings.filterwarnings('ignore') RANDOM_SEED = 17 kf = KFold(n_splits=5, shuffle=True, random_state=RANDOM_SEED) parse_dates = ['dateCrawled', 'dateCreated', 'lastSeen'] df = pd.read_csv('data/autos.csv', sep=',', parse_dates=parse_dates, encoding = 'ISO-8859-1') df.head() df.shape df.info() df.drop([ 'abtest'], axis=1, inplace=True) df = df[df['offerType'] != 'Gesuch'].drop('offerType', axis=1) df['vehicleType'].fillna(axis=0, value='andere', inplace=True) df.dropna(axis=0, subset=['gearbox'], inplace=True) df['brand'].unique() df = df[df['brand'] != "sonstige_autos"] model_nan_idx = df[pd.isnull(df["model"])].index def model_extractor(x): x = x.apply(str.lower) name = x['name'].split(sep='_') try: if name[0] == x["brand"]: return name[1] else: return np.nan except: return np.nan df.loc[model_nan_idx, 'model'] = df.loc[model_nan_idx][['name', 'brand']].apply(model_extractor, axis=1) df.dropna(axis=0, subset=['model'], inplace=True) df.drop('name', axis=1, inplace=True) df['fuelType'].value_counts(dropna=False) df['fuelType'].fillna(axis=0, value='andere', inplace=True) df['notRepairedDamage'].fillna(value='nein', inplace=True) df['notRepairedDamage'] = df['notRepairedDamage'].map({'ja': 1, 'nein': 0}).astype('int64') df.describe(include='all').T df.drop('nrOfPictures', axis=1, inplace=True) feats = ['price', 'yearOfRegistration', 'powerPS'] fig, axes = plt.subplots(ncols=len(feats), nrows=1, figsize=(18,6)) for i, feat in enumerate(feats): sns.boxplot(df[feat], ax=axes[i], orient='v', width=0.5, color='g'); axes[i].set_ylabel('') axes[i].set_title(feat) df = df[(df['price'] >= 100) & (df['price'] <= 150000)] df = df[(df['yearOfRegistration'] >= 1976) & (df['yearOfRegistration'] < 2016)] df = df[(df['powerPS'] > 20) & (df['powerPS'] < 600)] from matplotlib.ticker import FuncFormatter feats = ['price', 'yearOfRegistration', 'powerPS'] fig, axis = plt.subplots(ncols=3, figsize=(18, 6)) for i, feat in enumerate(feats): sns.boxplot(np.log(df[feat]), ax=axis[i], orient='v', width=0.5, color='g'); y_formatter = FuncFormatter(lambda x, pos: ('%i')%(np.exp(x))) axis[i].yaxis.set_major_formatter(y_formatter) df['monthOfRegistration'] = df['monthOfRegistration'].astype('object') plt.figure(figsize=(10, 5)) sns.countplot(df['monthOfRegistration']); df.hist(figsize=(15, 10)); print('Доля машин с пробегом 150000 км: ', df[df['kilometer'] == 150000].shape[0] / df.shape[0]) fig, axes = plt.subplots(ncols=3, figsize=(15,5)) (df['kilometer'] ** 2).hist(ax=axes[0]); np.log1p(df['price']).hist(ax=axes[1]); np.log1p(df['powerPS']).hist(ax=axes[2]); pal = sns.light_palette("green", as_cmap=True) fig, ax = plt.subplots(figsize=(12,8)) sns.heatmap(df.corr(), cmap="RdBu_r", annot=True, fmt = '.2f', ax=ax); feats = ['seller', 'vehicleType', 'gearbox', 'fuelType'] fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(14,12)) idx = 0 for i in range(2): for j in range(2): sns.countplot(df[feats[idx]], ax=axes[i][j]); idx += 1 df.drop('seller', axis=1, inplace=True) df['fuelType'].replace(['lpg', 'hybrid', 'cng', 'elektro'], 'andere', inplace=True) sns.countplot(df['fuelType']); fig, axis = plt.subplots(nrows=1, ncols=2, figsize=(14, 6)) sns.boxplot(x='fuelType', y='price', data=df, ax=axis[0]); sns.boxplot(x='gearbox', y='price', data=df, ax=axis[1]); fig, axis = plt.subplots(figsize=(16, 8), ) sns.boxplot(x='brand', y='price', data=df); axis.set_xticklabels(df['brand'].unique(), rotation=80); df.drop(['postalCode'], axis=1, inplace=True) y = np.log1p(df['price']) X = df.drop(['price'], axis=1) X_train, X_test, y_train, y_test = \ train_test_split(X, y, test_size=0.3, shuffle=True, stratify=X['brand'], random_state=RANDOM_SEED) split_idx = X_train.shape[0] for cat_feature in X.columns[X.dtypes == 'object']: X_train[cat_feature] = X_train[cat_feature].astype('category') X_train[cat_feature].cat.set_categories(X_train[cat_feature].unique(), inplace=True) X_test[cat_feature] = X_test[cat_feature].astype('category') X_test[cat_feature].cat.set_categories(X_train[cat_feature].unique(), inplace=True) # В тестовом наборе оказались модели, которых не было в тренировочном, появившиеся NaN замняем "andere" ("другое"). X_test['model'].fillna(value='andere', inplace=True) X_train = pd.get_dummies(X_train, columns=X_train.columns[X_train.dtypes == 'category']) X_test = pd.get_dummies(X_test, columns=X_test.columns[X_test.dtypes == 'category']) X_train.shape, X_test.shape # Удаляем даты X_train_base = X_train.drop(['dateCrawled','dateCreated', 'lastSeen'], axis=1) X_test_base = X_test.drop(['dateCrawled','dateCreated', 'lastSeen'], axis=1) scaler = StandardScaler().fit(X_train_base) X_train_scaled = scaler.transform(X_train_base) X_test_scaled = scaler.transform(X_test_base) %%time lr = LinearRegression() lr.fit(X_train_scaled, y_train) y_preds_lr = lr.predict(X_test_scaled) print('LinearRegression:') print('\tMAE: ', mean_absolute_error(y_test, y_preds_lr)) print('\tR2: ', r2_score(y_test, y_preds_lr)) %%time rf = RandomForestRegressor(random_state=RANDOM_SEED) rf.fit(X_train_base, y_train) y_preds = rf.predict(X_test_base) print('RandomForestRegressor:') print('\tMAE: ', mean_absolute_error(y_test, y_preds)) print('\tR2: ', r2_score(y_test, y_preds)) print('Target range: (%.2f, %.2f) ' % (y.min(), y.max())) new_feats_train = pd.DataFrame(index=X_train.index) new_feats_test = pd.DataFrame(index=X_test.index) new_feats_train['adUpDays'] = (X_train['lastSeen'] - X_train['dateCrawled']).dt.days + 1 new_feats_test['adUpDays'] = (X_test['lastSeen'] - X_test['dateCrawled']).dt.days + 1 new_feats_train['age'] = X_train['dateCrawled'].apply(lambda x: x.year) - X_train['yearOfRegistration'] new_feats_test['age'] = X_test['dateCrawled'].apply(lambda x: x.year) - X_test['yearOfRegistration'] new_feats_train['kilPerYear'] = X_train['kilometer'] / new_feats_train['age'] new_feats_test['kilPerYear'] = X_test['kilometer'] / new_feats_test['age'] X_train.drop(['dateCrawled','dateCreated', 'lastSeen'], axis=1, inplace=True) X_test.drop(['dateCrawled','dateCreated', 'lastSeen'], axis=1, inplace=True) new_feats_train['adUpDays'].hist(); new_feats_train['kilPerYear'].hist(); new_feats_train['kilPerYear_log'] = np.log1p(new_feats_train['kilPerYear']) new_feats_test['kilPerYear_log'] = np.log1p(new_feats_test['kilPerYear']) new_feats_train['powerPS_log'] = np.log1p(X_train['powerPS']) new_feats_test['powerPS_log'] = np.log1p(X_test['powerPS']) scaler = StandardScaler().fit(X_train[['kilometer', 'yearOfRegistration']]) features_scaled_train = scaler.transform(X_train[['kilometer', 'yearOfRegistration']]) features_scaled_test = scaler.transform(X_test[['kilometer', 'yearOfRegistration']]) poly = PolynomialFeatures(2) X_train_poly = poly.fit_transform(np.concatenate([new_feats_train[['powerPS_log', 'kilPerYear_log', 'adUpDays']], features_scaled_train], axis=1)) X_test_poly = poly.transform(np.concatenate([new_feats_test[['powerPS_log', 'kilPerYear_log', 'adUpDays']], features_scaled_test], axis=1)) X_train_new = np.concatenate([X_train_poly, X_train.drop(['powerPS', 'kilometer', \ 'yearOfRegistration'], axis=1)], axis=1) X_test_new = np.concatenate([X_test_poly, X_test.drop(['powerPS', 'kilometer', \ 'yearOfRegistration'], axis=1)], axis=1) %%time lr = LinearRegression() lr.fit(X_train_new, y_train) y_preds_lr = lr.predict(X_test_new) print('LinearRegression:') print('\tMAE: ', mean_absolute_error(y_test, y_preds_lr)) print('\tR2: ', r2_score(y_test, y_preds_lr)) X_train_new = pd.concat([X_train, new_feats_train[['kilPerYear']]], axis=1) X_test_new = pd.concat([X_test, new_feats_test[['kilPerYear']]], axis=1) %%time rf = RandomForestRegressor(random_state=RANDOM_SEED) rf.fit(X_train_new, y_train) y_preds = rf.predict(X_test_new) print('RandomForestRegressor:') print('\tMAE: ', mean_absolute_error(y_test, y_preds)) print('\tR2: ', r2_score(y_test, y_preds)) X_train_new = pd.concat([X_train, new_feats_train[['adUpDays']]], axis=1) X_test_new = pd.concat([X_test, new_feats_test[['adUpDays']]], axis=1) %%time rf = RandomForestRegressor(random_state=RANDOM_SEED) rf.fit(X_train_new, y_train) y_preds = rf.predict(X_test_new) print('RandomForestRegressor:') print('\tMAE: ', mean_absolute_error(y_test, y_preds)) print('\tR2: ', r2_score(y_test, y_preds)) features = X_train_new.columns importances = rf.feature_importances_ num_to_plot = 10 indices = np.argsort(importances)[::-1][:num_to_plot] plt.figure(figsize=(14,6)) plt.title('Feature impotances for a RandomForestRegressor model', size=15) bars = plt.bar(range(num_to_plot), importances[indices], align="center") ticks = plt.xticks(range(num_to_plot), features.values[indices[:]], rotation=70, size=13) def valid_curves_plot(hyperparam, param_range): param_range = param_range train_scores, test_scores = validation_curve(RandomForestRegressor(random_state=RANDOM_SEED), X_train_new, y_train, param_name=hyperparam, param_range=param_range, cv=kf, n_jobs=-1, scoring='r2') train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.figure(figsize=(9,6)) plt.title('Validation curves for a RandomForestRegressor model') plt.xlabel(hyperparam) plt.ylabel('R2') plt.ylim(0.8, 1.0) plt.fill_between(param_range, train_scores_mean - train_scores_std, \ train_scores_mean + train_scores_std, alpha=0.2, color="r") plt.plot(param_range, train_scores_mean, label='Training error', color="r") plt.fill_between(param_range, test_scores_mean - test_scores_std, \ test_scores_mean + test_scores_std, alpha=0.2, color="g") plt.plot(param_range, test_scores_mean, label='Validation error', color="g") plt.legend(loc="best") plt.xticks(param_range) plt.show() %%time valid_curves_plot(hyperparam='n_estimators', param_range=[5, 10, 15, 20, 30, 50, 75, 100]) %%time valid_curves_plot(hyperparam='max_depth', param_range=[3, 5, 7, 9, 11, 13, 15, 17, 20, 22, 24]) %%time valid_curves_plot(hyperparam='min_samples_leaf', param_range=[1, 3, 5, 7, 9, 11, 13]) %%time valid_curves_plot(hyperparam='max_features', param_range=[50, 100, 200, 300, 400, 500, 600, 700]) # Сделаем инициализацию параметров, по которым хотим сделать полный перебор parameters = {'max_features': [100,200, 300, 400, 500], 'min_samples_leaf': [1, 3, 5, 7], 'max_depth': [13, 15, 17, 20, 22, 24]} rfc = RandomForestRegressor(n_estimators=30, random_state=RANDOM_SEED, n_jobs=-1) gcv = GridSearchCV(rfc, parameters, n_jobs=-1, cv=kf) gcv.fit(X_train_new, y_train) gcv.best_estimator_, gcv.best_score_ %%time gcv.best_estimator_.fit(X_train_new, y_train) y_preds = gcv.best_estimator_.predict(X_test_new) print('RandomForestRegressor:') print('\tMAE: ', mean_absolute_error(y_test, y_preds)) print('\tR2: ', r2_score(y_test, y_preds)) y_test_preds = pd.DataFrame({'y_true': np.exp(y_test) - 1, 'y_preds': np.exp(y_preds) - 1}) y_test_preds['y_true'] =round(y_test_preds['y_true'], 2) y_test_preds['y_preds'] =round(y_test_preds['y_preds'], 2) y_test_preds['diff'] = np.absolute(y_test_preds['y_true'] - y_test_preds['y_preds']) y_test_preds.head(20).T from ggplot import * ggplot(y_test_preds, aes(x='y_true',y='y_preds')) + \ geom_point(color='blue') + \ xlim(-1e03, 1e+05) + ylim(-1e03, 1e+05) + \ ggtitle("RandomForestRegressor model") + \ xlab("True price") + ylab("Predicted price") + \ geom_abline(intercept=0, size=2, color='red') y_test_preds.sort_values('y_true').head() %%time plt.figure(figsize=(12, 8)) plt.title("Learning curves for a RandomForestRegressor model") plt.xlabel("Training examples") plt.ylabel("MSE") train_sizes, train_scores, test_scores = \ learning_curve(RandomForestRegressor(max_depth=24, max_features=200, min_samples_leaf=1, n_estimators=30, n_jobs=-1, random_state=RANDOM_SEED), np.concatenate([X_train, X_test]), np.concatenate([y_train, y_test]), cv=kf, train_sizes=np.linspace(0.1, 1.0, 5), random_state=RANDOM_SEED) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.grid() plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training error") plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Validation error") plt.legend()
0.381911
0.937211
# Extending ITK with your own module ## Why? * New algorithm * Easy integration in ITK * Reproducible science * Easy Python wrapping ## Requirements * CMake * C++ development environment ## Why use C++ * C++ allows the developer to have full access to ITK * Memory management * Thread management * Any dimension and pixel type supported. * Key element missing in Python: Iterators. ## Iterators * An iterator is a fast and safe way to access an entire range of memory. * It can be used to process an entire image. ## ITK Modules * ITK is organized in modules * Each module typically contains filters that are related (e.g. Segmentation, Registration, Filtering) ## What are remote modules * Remote modules are an easy way to develop new ITK filters and integrate them in ITK. * Allows the developer to easily publish and share their new algorithms. * ITK continuously contains the best available algorithms. * Remote modules are separate project from ITK. The developer(s) is (are) responsible of maintaining the project. * Remote modules integrate one or more filters implemented in C++ and their corresponding Python bindings. ## Examples * ITKTextureFeatures * ITKUltrasound * ITKIsotropicWavelets ## ITKTextureFeatures ### Two filters: * itkCoocurrenceTextureFeaturesImageFilter: computes textural features based on intensity-based co-occurrence matrices in the image. * itkRunLengthTextureFeaturesImageFilter: computes textural features based on equally valued intensity clusters of different sizes or run lengths in the image. <div style="width: 100%; display: table;"> <div style="display: table-row"> <div style="width: 350px; display: table-cell;"> <figure> <img src="data/CBCT-TextureInput.png"> <figcaption>Fig1. - CBCT of the test condyle</figcaption> </figure> </div> <div style="width: 350px; display: table-cell;"> <figure> <img src="data/CBCT-TextureRunLengthNonUniformity.png"> <figcaption>Fig2. - texture’s run length non uniformity</figcaption> </figure> </div> </div> </div> http://www.insight-journal.org/browse/publication/985 ## ITKUltrasound * High performance ultrasound B-Mode implementation based on the Insight Toolkit (ITK). * Introduction of an itk::StreamingResampleImageFilter. <figure> <img src="data/Ultrasound-B-mode.png"> <figcaption>Fig1. Large, scan converted B-Mode image of anechoic spheres in a tissue mimicking ultrasound phantom. The image was processed without swapping to disk.</figcaption> </figure> http://www.insight-journal.org/browse/publication/722 ## ITKIsotropicWavelets * Multiresolution (MRA) analysis framework using isotropic and steerable wavelets in the frequency domain. * Provides the backbone for state of the art filters for denoising, feature detection or phase analysis in N-dimension http://www.insight-journal.org/browse/publication/986 ## Creation of a remote module: Overview 1. The developer creates a new module containing new ITK filters. * The new module is its own independent GitHub project. * The new module can be easily be compiled and used in combination with ITK. 2. The developer writes an Insight Journal article * The module is more visible to the community. * An option can be added to ITK to compile the remote module as part of ITK. ## Creation of a remote module: details * The template project source code is here: https://github.com/InsightSoftwareConsortium/ITKModuleTemplate * Run the following commands: python -m pip install cookiecutter python -m cookiecutter gh:InsightSoftwareConsortium/ITKModuleTemplate * Provide requested information. Answer the following questions (Pressing "Enter" will use the default option): full_name [Insight Software Consortium]: email [itk+community@discourse.itk.org]: github_username [itkrobot]: project_name [ITKModuleTemplate]: module_name [ModuleTemplate]: python_package_name [itk-moduletemplate]: download_url [https://github.com/InsightSoftwareConsortium/ITKModuleTemplate]: project_short_description [This is a template that serves as a starting point for a new module.]: project_long_description [ITK is an open-source, cross-platform library that provides developers with an extensive suite of software tools for image analysis. Developed through extreme programming methodologies, ITK employs leading-edge algorithms for registering and segmenting multidimensional scientific images.]: ## New Module Content <pre> (itk) fbudin:ITKModuleTemplate/ $ tree -a . ├── appveyor.yml ├── .circleci │   └── config.yml ├── CMakeLists.txt ├── CTestConfig.cmake ├── include │   ├── itkMinimalStandardRandomVariateGenerator.h │   ├── itkMyFilter.h │   ├── itkMyFilter.hxx │   ├── itkNormalDistributionImageSource.h │   └── itkNormalDistributionImageSource.hxx ├── itk-module.cmake ├── LICENSE ├── README.rst ├── setup.py </pre> <pre> ├── src │ ├── CMakeLists.txt │ └── itkMinimalStandardRandomVariateGenerator.cxx ├── test │ ├── Baseline │ │ ├── itkMyFilterTestOutput.mha.sha512 │ │ └── itkNormalDistributionImageSourceTestOutput.mha.sha512 │ ├── CMakeLists.txt │ ├── itkMinimalStandardRandomVariateGeneratorTest.cxx │ ├── itkMyFilterTest.cxx │ └── itkNormalDistributionImageSourceTest.cxx ├── .travis.yml └── wrapping ├── CMakeLists.txt ├── itkMinimalStandardRandomVariateGenerator.wrap └── itkNormalDistributionImageSource.wrap </pre> ## Directory structure * `src` and `include`: header files and source code * `test`: unit tests * `wrapping`: Required files to automatically create Python bindings. ## Filter code <pre> template< typename TInputImage, typename TOutputImage > void MyFilter< TInputImage, TOutputImage > ::DynamicThreadedGenerateData( const OutputRegionType & outputRegion) { OutputImageType * output = this->GetOutput(); const InputImageType * input = this->GetInput(); using InputRegionType = typename InputImageType::RegionType; InputRegionType inputRegion = InputRegionType(outputRegion.GetSize()); itk::ImageRegionConstIterator<InputImageType> in(input, inputRegion); itk::ImageRegionIterator<OutputImageType> out(output, outputRegion); for (in.GoToBegin(), out.GoToBegin(); !in.IsAtEnd() && !out.IsAtEnd(); ++in, ++out) { out.Set( in.Get() ); } } </pre> ## Continuous integration * Appveyor (Windows) * Travis (MacOS) * CircleCI (Linux) ## Python packages * Automatically generated in the continuous integration platforms. * Download wheel to personal computer * Upload wheel to PyPI (`pip install`). ## Where to find more information: * ITK Software Guide * [Configuring and building ITK](https://itk.org/ITKSoftwareGuide/html/Book1/ITKSoftwareGuide-Book1ch2.html#x22-130002) * [Create a remote module](https://itk.org/ITKSoftwareGuide/html/Book1/ITKSoftwareGuide-Book1ch9.html#x55-1640009.7) * [How to write a filter](https://itk.org/ITKSoftwareGuide/html/Book1/ITKSoftwareGuide-Book1ch8.html#x54-1330008) * [Iterators](https://itk.org/ITKSoftwareGuide/html/Book1/ITKSoftwareGuide-Book1ch6.html#x44-1020006) * [Modules](https://itk.org/ITKSoftwareGuide/html/Book1/ITKSoftwareGuide-Book1ch9.html#x48-1480009) * [Discourse forum](https://discourse.itk.org/) ## Exercises ### Exercise 1: Create the skeleton of a remote module * Hint1: You can run command lines by prefixing them with the symbol '!' * Hint2: You will need to add the argument '--no-input' to the command you are using. This is a limitation due to this notebook environment. ### Exercise 2: Modify the filter * Add a constant value * Multiply by a constant factor ### Enjoy ITK!
github_jupyter
# Extending ITK with your own module ## Why? * New algorithm * Easy integration in ITK * Reproducible science * Easy Python wrapping ## Requirements * CMake * C++ development environment ## Why use C++ * C++ allows the developer to have full access to ITK * Memory management * Thread management * Any dimension and pixel type supported. * Key element missing in Python: Iterators. ## Iterators * An iterator is a fast and safe way to access an entire range of memory. * It can be used to process an entire image. ## ITK Modules * ITK is organized in modules * Each module typically contains filters that are related (e.g. Segmentation, Registration, Filtering) ## What are remote modules * Remote modules are an easy way to develop new ITK filters and integrate them in ITK. * Allows the developer to easily publish and share their new algorithms. * ITK continuously contains the best available algorithms. * Remote modules are separate project from ITK. The developer(s) is (are) responsible of maintaining the project. * Remote modules integrate one or more filters implemented in C++ and their corresponding Python bindings. ## Examples * ITKTextureFeatures * ITKUltrasound * ITKIsotropicWavelets ## ITKTextureFeatures ### Two filters: * itkCoocurrenceTextureFeaturesImageFilter: computes textural features based on intensity-based co-occurrence matrices in the image. * itkRunLengthTextureFeaturesImageFilter: computes textural features based on equally valued intensity clusters of different sizes or run lengths in the image. <div style="width: 100%; display: table;"> <div style="display: table-row"> <div style="width: 350px; display: table-cell;"> <figure> <img src="data/CBCT-TextureInput.png"> <figcaption>Fig1. - CBCT of the test condyle</figcaption> </figure> </div> <div style="width: 350px; display: table-cell;"> <figure> <img src="data/CBCT-TextureRunLengthNonUniformity.png"> <figcaption>Fig2. - texture’s run length non uniformity</figcaption> </figure> </div> </div> </div> http://www.insight-journal.org/browse/publication/985 ## ITKUltrasound * High performance ultrasound B-Mode implementation based on the Insight Toolkit (ITK). * Introduction of an itk::StreamingResampleImageFilter. <figure> <img src="data/Ultrasound-B-mode.png"> <figcaption>Fig1. Large, scan converted B-Mode image of anechoic spheres in a tissue mimicking ultrasound phantom. The image was processed without swapping to disk.</figcaption> </figure> http://www.insight-journal.org/browse/publication/722 ## ITKIsotropicWavelets * Multiresolution (MRA) analysis framework using isotropic and steerable wavelets in the frequency domain. * Provides the backbone for state of the art filters for denoising, feature detection or phase analysis in N-dimension http://www.insight-journal.org/browse/publication/986 ## Creation of a remote module: Overview 1. The developer creates a new module containing new ITK filters. * The new module is its own independent GitHub project. * The new module can be easily be compiled and used in combination with ITK. 2. The developer writes an Insight Journal article * The module is more visible to the community. * An option can be added to ITK to compile the remote module as part of ITK. ## Creation of a remote module: details * The template project source code is here: https://github.com/InsightSoftwareConsortium/ITKModuleTemplate * Run the following commands: python -m pip install cookiecutter python -m cookiecutter gh:InsightSoftwareConsortium/ITKModuleTemplate * Provide requested information. Answer the following questions (Pressing "Enter" will use the default option): full_name [Insight Software Consortium]: email [itk+community@discourse.itk.org]: github_username [itkrobot]: project_name [ITKModuleTemplate]: module_name [ModuleTemplate]: python_package_name [itk-moduletemplate]: download_url [https://github.com/InsightSoftwareConsortium/ITKModuleTemplate]: project_short_description [This is a template that serves as a starting point for a new module.]: project_long_description [ITK is an open-source, cross-platform library that provides developers with an extensive suite of software tools for image analysis. Developed through extreme programming methodologies, ITK employs leading-edge algorithms for registering and segmenting multidimensional scientific images.]: ## New Module Content <pre> (itk) fbudin:ITKModuleTemplate/ $ tree -a . ├── appveyor.yml ├── .circleci │   └── config.yml ├── CMakeLists.txt ├── CTestConfig.cmake ├── include │   ├── itkMinimalStandardRandomVariateGenerator.h │   ├── itkMyFilter.h │   ├── itkMyFilter.hxx │   ├── itkNormalDistributionImageSource.h │   └── itkNormalDistributionImageSource.hxx ├── itk-module.cmake ├── LICENSE ├── README.rst ├── setup.py </pre> <pre> ├── src │ ├── CMakeLists.txt │ └── itkMinimalStandardRandomVariateGenerator.cxx ├── test │ ├── Baseline │ │ ├── itkMyFilterTestOutput.mha.sha512 │ │ └── itkNormalDistributionImageSourceTestOutput.mha.sha512 │ ├── CMakeLists.txt │ ├── itkMinimalStandardRandomVariateGeneratorTest.cxx │ ├── itkMyFilterTest.cxx │ └── itkNormalDistributionImageSourceTest.cxx ├── .travis.yml └── wrapping ├── CMakeLists.txt ├── itkMinimalStandardRandomVariateGenerator.wrap └── itkNormalDistributionImageSource.wrap </pre> ## Directory structure * `src` and `include`: header files and source code * `test`: unit tests * `wrapping`: Required files to automatically create Python bindings. ## Filter code <pre> template< typename TInputImage, typename TOutputImage > void MyFilter< TInputImage, TOutputImage > ::DynamicThreadedGenerateData( const OutputRegionType & outputRegion) { OutputImageType * output = this->GetOutput(); const InputImageType * input = this->GetInput(); using InputRegionType = typename InputImageType::RegionType; InputRegionType inputRegion = InputRegionType(outputRegion.GetSize()); itk::ImageRegionConstIterator<InputImageType> in(input, inputRegion); itk::ImageRegionIterator<OutputImageType> out(output, outputRegion); for (in.GoToBegin(), out.GoToBegin(); !in.IsAtEnd() && !out.IsAtEnd(); ++in, ++out) { out.Set( in.Get() ); } } </pre> ## Continuous integration * Appveyor (Windows) * Travis (MacOS) * CircleCI (Linux) ## Python packages * Automatically generated in the continuous integration platforms. * Download wheel to personal computer * Upload wheel to PyPI (`pip install`). ## Where to find more information: * ITK Software Guide * [Configuring and building ITK](https://itk.org/ITKSoftwareGuide/html/Book1/ITKSoftwareGuide-Book1ch2.html#x22-130002) * [Create a remote module](https://itk.org/ITKSoftwareGuide/html/Book1/ITKSoftwareGuide-Book1ch9.html#x55-1640009.7) * [How to write a filter](https://itk.org/ITKSoftwareGuide/html/Book1/ITKSoftwareGuide-Book1ch8.html#x54-1330008) * [Iterators](https://itk.org/ITKSoftwareGuide/html/Book1/ITKSoftwareGuide-Book1ch6.html#x44-1020006) * [Modules](https://itk.org/ITKSoftwareGuide/html/Book1/ITKSoftwareGuide-Book1ch9.html#x48-1480009) * [Discourse forum](https://discourse.itk.org/) ## Exercises ### Exercise 1: Create the skeleton of a remote module * Hint1: You can run command lines by prefixing them with the symbol '!' * Hint2: You will need to add the argument '--no-input' to the command you are using. This is a limitation due to this notebook environment. ### Exercise 2: Modify the filter * Add a constant value * Multiply by a constant factor ### Enjoy ITK!
0.805096
0.74667
# Gender Wage Gap In this case study we take a look at the impact of gender on weekly wage. A few other variables including education level, geographic location, and years of experience. Note that years of experience has exp1 which appears to be the raw value, and then exp2 and exp3 are transformations (it claims squared and cubic, though I don't really see how that could be true from the values). ``` from itertools import chain, combinations import pandas as pd import statsmodels.formula.api as smf from sklearn.preprocessing import PolynomialFeatures from sklearn.model_selection import train_test_split from sklearn.linear_model import Lasso from sklearn.pipeline import Pipeline from interpret import show from interpret.perf import RegressionPerf from interpret.glassbox import ExplainableBoostingRegressor, LinearRegression, RegressionTree data = pd.read_csv("data/wagedata.csv").drop('Unnamed: 0', axis=1) data.head() ``` ## Data Centering & Scaling ``` cols = ['exp1', 'exp2', 'exp3'] clean_data = data.copy() for col in cols: clean_data[f'{col}_c'] = (clean_data[col] - clean_data[col].mean()) / (2*clean_data[col].std()) clean_data.head() ``` ### Data Exploration We want to look at the differences in wages for mean and women. ``` clean_data.groupby('female').mean().T clean_data.columns independant_model = 'wage ~ female + cg + sc + hsg + mw + so + we + ne + exp1_c + exp2_c + exp3_c' results = smf.ols(independant_model, data=clean_data).fit() print(results.summary()) features = ['female', 'cg', 'sc', 'hsg', 'mw', 'so', 'we', 'ne', 'exp1_c'] X_train, X_test, y_train, y_test = train_test_split(clean_data[features], clean_data['wage'], test_size=0.20) polyfeatures = PolynomialFeatures(degree=2, interaction_only=True) model = Lasso(alpha=0.1) model_pipeline = Pipeline([("preproc", polyfeatures), ('model', model)]) model_pipeline.fit(X=X_train, y=y_train) dir(model_pipeline) import interpret poly_perf = RegressionPerf(model_pipeline.predict).explain_perf(X_test.values, y_test.values, name='Poly Regression') show(poly_perf) from interpret import show from interpret.data import Marginal marginal = Marginal().explain_data(X_train.values, y_train.values, name = 'Train Data') show(marginal) lr = LinearRegression(random_state=1) lr.fit(X_train.values, y_train.values) lr_perf = RegressionPerf(lr.predict).explain_perf(X_test.values, y_test.values, name='Linear Regression') show(lr_perf) local = lr.explain_local(X_test.values, y_test.values, name='Linear Regression') show(local) global_ = lr.explain_global() show(global_) ```
github_jupyter
from itertools import chain, combinations import pandas as pd import statsmodels.formula.api as smf from sklearn.preprocessing import PolynomialFeatures from sklearn.model_selection import train_test_split from sklearn.linear_model import Lasso from sklearn.pipeline import Pipeline from interpret import show from interpret.perf import RegressionPerf from interpret.glassbox import ExplainableBoostingRegressor, LinearRegression, RegressionTree data = pd.read_csv("data/wagedata.csv").drop('Unnamed: 0', axis=1) data.head() cols = ['exp1', 'exp2', 'exp3'] clean_data = data.copy() for col in cols: clean_data[f'{col}_c'] = (clean_data[col] - clean_data[col].mean()) / (2*clean_data[col].std()) clean_data.head() clean_data.groupby('female').mean().T clean_data.columns independant_model = 'wage ~ female + cg + sc + hsg + mw + so + we + ne + exp1_c + exp2_c + exp3_c' results = smf.ols(independant_model, data=clean_data).fit() print(results.summary()) features = ['female', 'cg', 'sc', 'hsg', 'mw', 'so', 'we', 'ne', 'exp1_c'] X_train, X_test, y_train, y_test = train_test_split(clean_data[features], clean_data['wage'], test_size=0.20) polyfeatures = PolynomialFeatures(degree=2, interaction_only=True) model = Lasso(alpha=0.1) model_pipeline = Pipeline([("preproc", polyfeatures), ('model', model)]) model_pipeline.fit(X=X_train, y=y_train) dir(model_pipeline) import interpret poly_perf = RegressionPerf(model_pipeline.predict).explain_perf(X_test.values, y_test.values, name='Poly Regression') show(poly_perf) from interpret import show from interpret.data import Marginal marginal = Marginal().explain_data(X_train.values, y_train.values, name = 'Train Data') show(marginal) lr = LinearRegression(random_state=1) lr.fit(X_train.values, y_train.values) lr_perf = RegressionPerf(lr.predict).explain_perf(X_test.values, y_test.values, name='Linear Regression') show(lr_perf) local = lr.explain_local(X_test.values, y_test.values, name='Linear Regression') show(local) global_ = lr.explain_global() show(global_)
0.553264
0.980543
``` from bs4 import BeautifulSoup as BS from selenium import webdriver from functools import reduce import pandas as pd import time import xport import pandas as pd import numpy as np def render_page(url): driver = webdriver.Chrome('/Users/cp/Downloads/chromedriver') driver.get(url) time.sleep(3) r = driver.page_source driver.quit() return r def scraper2(page, dates): output = pd.DataFrame() for d in dates: url = str(str(page) + str(d)) r = render_page(url) soup = BS(r, "html.parser") container = soup.find('lib-city-history-observation') check = container.find('tbody') data = [] for c in check.find_all('tr', class_='ng-star-inserted'): for i in c.find_all('td', class_='ng-star-inserted'): trial = i.text trial = trial.strip(' ') data.append(trial) if len(data)%2 == 0: hour = pd.DataFrame(data[0::10], columns = ['hour']) temp = pd.DataFrame(data[1::10], columns = ['temp']) dew = pd.DataFrame(data[2::10], columns = ['dew']) humidity = pd.DataFrame(data[3::10], columns = ['humidity']) wind_speed = pd.DataFrame(data[5::10], columns = ['wind_speed']) pressure = pd.DataFrame(data[7::10], columns = ['pressure']) precip = pd.DataFrame(data[8::10], columns = ['precip']) cloud = pd.DataFrame(data[9::10], columns = ['cloud']) dfs = [hour, temp,dew,humidity, wind_speed, pressure, precip, cloud] df_final = reduce(lambda left, right: pd.merge(left, right, left_index=True, right_index=True), dfs) df_final['Date'] = str(d) + "-" + df_final.iloc[:, :1].astype(str) output = output.append(df_final) print('Scraper done!') output = output[['hour', 'temp', 'dew', 'humidity', 'wind_speed', 'pressure', 'precip', 'cloud']] return output def jan_dates(): lst = [] for i in range(1, 32): i = str(i) lst.append(str("2021-1-"+i)) return lst january = jan_dates() def feb_dates(): lst = [] for i in range(1, 30): i = str(i) lst.append(str("2020-2-"+i)) return lst feb = feb_dates() def march_dates(): lst = [] for i in range(1, 32): i = str(i) lst.append(str("2020-3-"+i)) return lst mar = march_dates() def april_dates(): lst = [] for i in range(1, 31): i = str(i) lst.append(str("2020-4-"+i)) return lst april = april_dates() def may_dates(): lst = [] for i in range(1, 32): i = str(i) lst.append(str("2020-5-"+i)) return lst may = may_dates() def june_dates(): lst = [] for i in range(1, 31): i = str(i) lst.append(str("2020-6-"+i)) return lst june = june_dates() def july_dates(): lst = [] for i in range(1, 32): i = str(i) lst.append(str("2020-7-"+i)) return lst july = july_dates() def august_dates(): lst = [] for i in range(1, 32): i = str(i) lst.append(str("2020-8-"+i)) return lst august = august_dates() def october_dates(): lst = [] for i in range(1, 32): i = str(i) lst.append(str("2020-10-"+i)) return lst october = october_dates() def november_dates(): lst = [] for i in range(1, 31): i = str(i) lst.append(str("2020-11-"+i)) return lst november_to7 = november_dates() def december_dates(): lst = [] for i in range(1, 32): i = str(i) lst.append(str("2020-12-"+i)) return lst december = december_dates() def make_year_dates(yr= 2020): if yr%4 ==0: feb_days = 30 else: feb_days = 29 yr = str(yr) january = [] for i in range(1, 32): i = str(i) january.append(f"{yr}-1-{i}") feb = [] for i in range(1, feb_days): i = str(i) feb.append(f"{yr}-2-{i}") march = [] for i in range(1, 32): i = str(i) march.append(f"{yr}-3-{i}") april = [] for i in range(1, 31): i = str(i) april.append(f"{yr}-4-{i}") may = [] for i in range(1, 32): i = str(i) may.append(f"{yr}-5-{i}") june = [] for i in range(1, 31): i = str(i) june.append(f"{yr}-6-{i}") july = [] for i in range(1, 32): i = str(i) july.append(f"{yr}-7-{i}") august = [] for i in range(1, 32): i = str(i) august.append(f"{yr}-8-{i}") september = [] for i in range(1, 31): i = str(i) september.append(f"{yr}-9-{i}") october = [] for i in range(1, 32): i = str(i) october.append(f"{yr}-10-{i}") november = [] for i in range(1, 31): i = str(i) november.append(f"{yr}-11-{i}") december = [] for i in range(1, 32): i = str(i) december.append(f"{yr}-12-{i}") yr_total = january+feb+ march+ april+ may+ june + july + august+ september+ october+ november+ december return yr_total ?january, feb, march, april, may, june, july, august, september, october, november, december make_year_dates(yr= 2019) ```
github_jupyter
from bs4 import BeautifulSoup as BS from selenium import webdriver from functools import reduce import pandas as pd import time import xport import pandas as pd import numpy as np def render_page(url): driver = webdriver.Chrome('/Users/cp/Downloads/chromedriver') driver.get(url) time.sleep(3) r = driver.page_source driver.quit() return r def scraper2(page, dates): output = pd.DataFrame() for d in dates: url = str(str(page) + str(d)) r = render_page(url) soup = BS(r, "html.parser") container = soup.find('lib-city-history-observation') check = container.find('tbody') data = [] for c in check.find_all('tr', class_='ng-star-inserted'): for i in c.find_all('td', class_='ng-star-inserted'): trial = i.text trial = trial.strip(' ') data.append(trial) if len(data)%2 == 0: hour = pd.DataFrame(data[0::10], columns = ['hour']) temp = pd.DataFrame(data[1::10], columns = ['temp']) dew = pd.DataFrame(data[2::10], columns = ['dew']) humidity = pd.DataFrame(data[3::10], columns = ['humidity']) wind_speed = pd.DataFrame(data[5::10], columns = ['wind_speed']) pressure = pd.DataFrame(data[7::10], columns = ['pressure']) precip = pd.DataFrame(data[8::10], columns = ['precip']) cloud = pd.DataFrame(data[9::10], columns = ['cloud']) dfs = [hour, temp,dew,humidity, wind_speed, pressure, precip, cloud] df_final = reduce(lambda left, right: pd.merge(left, right, left_index=True, right_index=True), dfs) df_final['Date'] = str(d) + "-" + df_final.iloc[:, :1].astype(str) output = output.append(df_final) print('Scraper done!') output = output[['hour', 'temp', 'dew', 'humidity', 'wind_speed', 'pressure', 'precip', 'cloud']] return output def jan_dates(): lst = [] for i in range(1, 32): i = str(i) lst.append(str("2021-1-"+i)) return lst january = jan_dates() def feb_dates(): lst = [] for i in range(1, 30): i = str(i) lst.append(str("2020-2-"+i)) return lst feb = feb_dates() def march_dates(): lst = [] for i in range(1, 32): i = str(i) lst.append(str("2020-3-"+i)) return lst mar = march_dates() def april_dates(): lst = [] for i in range(1, 31): i = str(i) lst.append(str("2020-4-"+i)) return lst april = april_dates() def may_dates(): lst = [] for i in range(1, 32): i = str(i) lst.append(str("2020-5-"+i)) return lst may = may_dates() def june_dates(): lst = [] for i in range(1, 31): i = str(i) lst.append(str("2020-6-"+i)) return lst june = june_dates() def july_dates(): lst = [] for i in range(1, 32): i = str(i) lst.append(str("2020-7-"+i)) return lst july = july_dates() def august_dates(): lst = [] for i in range(1, 32): i = str(i) lst.append(str("2020-8-"+i)) return lst august = august_dates() def october_dates(): lst = [] for i in range(1, 32): i = str(i) lst.append(str("2020-10-"+i)) return lst october = october_dates() def november_dates(): lst = [] for i in range(1, 31): i = str(i) lst.append(str("2020-11-"+i)) return lst november_to7 = november_dates() def december_dates(): lst = [] for i in range(1, 32): i = str(i) lst.append(str("2020-12-"+i)) return lst december = december_dates() def make_year_dates(yr= 2020): if yr%4 ==0: feb_days = 30 else: feb_days = 29 yr = str(yr) january = [] for i in range(1, 32): i = str(i) january.append(f"{yr}-1-{i}") feb = [] for i in range(1, feb_days): i = str(i) feb.append(f"{yr}-2-{i}") march = [] for i in range(1, 32): i = str(i) march.append(f"{yr}-3-{i}") april = [] for i in range(1, 31): i = str(i) april.append(f"{yr}-4-{i}") may = [] for i in range(1, 32): i = str(i) may.append(f"{yr}-5-{i}") june = [] for i in range(1, 31): i = str(i) june.append(f"{yr}-6-{i}") july = [] for i in range(1, 32): i = str(i) july.append(f"{yr}-7-{i}") august = [] for i in range(1, 32): i = str(i) august.append(f"{yr}-8-{i}") september = [] for i in range(1, 31): i = str(i) september.append(f"{yr}-9-{i}") october = [] for i in range(1, 32): i = str(i) october.append(f"{yr}-10-{i}") november = [] for i in range(1, 31): i = str(i) november.append(f"{yr}-11-{i}") december = [] for i in range(1, 32): i = str(i) december.append(f"{yr}-12-{i}") yr_total = january+feb+ march+ april+ may+ june + july + august+ september+ october+ november+ december return yr_total ?january, feb, march, april, may, june, july, august, september, october, november, december make_year_dates(yr= 2019)
0.213295
0.160233
# 8.3 Singular value decomposition with python In this section we will decompose a matrix into it's singular value decomposition using python 3. This process is pretty simple. To get a matrix's singular value decomposition we can use scipy's package. Specifically; `from scipy.linalg import svd` We will also need numpy to. ``` import numpy as np ``` Suppose our matrix is the same as the hand written example. i.e. $$ A = \begin{bmatrix} 1 & 0 \\ 1 & -1 \\ 0 & 1\end{bmatrix}$$ ``` A = np.array([[1,0],[1,-1],[0,1]]) A ``` To find the decompistion of A, we use the scipy function svd. ``` from scipy.linalg import svd U, s, VT = svd(A) ``` This is the matrix U ``` U ``` The object s consists of the singular values. The output is not the matrix, just the values! Note if your matrix U has values such as -5.55111512e-17, just take it to be zero since the number is so small. ``` s ``` This is the matrix V.T (the transpose of V) ``` VT ``` It's that simple! Now one thing we have to keep in mind with forming the matrix Sigma is that it is the same shape as the original matrix, only with singular value on it's diagonal entries. Thus to form the matrix sigma, we have to first form an empty matrix of the same shape as the original matrix A, and then input the singular values on the diagonal. Hence we want a matrix sigma of the form (since A is 3x2); $$ \Sigma = \begin{bmatrix} \sqrt{3} & 0 \\ 0 & 1 \\ 0 & 0 \end{bmatrix}$$ which in python is of the form; `np.array([[1.73205081,0],[0,1],[0,0]])` This is done in several steps; 1. Create an empty mxn shaped matrix using numpy's zeros function. This is matrix sigma 2. Set the entries of the diagonal matrix sigma equal to the singular values using the numpy zeros function. The positions in sigma are dependendent upon the original shape of A (see below). To check our work, we use the numpy dot function to multiply matrices ``` from numpy import zeros from numpy import dot m,n = A.shape[0], A.shape[1] sigma = np.zeros((m, n)) for i in range(min(m, n)): sigma[i, i] = s[i] a1 = np.dot(U, np.dot(sigma, VT)) print('The original input: \n',A) print('\n') print('The matrix after inputting: \n',np.around(a1,1)) ``` We get the same thing (ignore any negative symbols associated with zeros). # Exercises For the following matrices, find the singular values using python Problem 1. $$ \begin{bmatrix} 1 & 0 & 1\\ 1 & -1 & 1\\ 0 & 1 &0\end{bmatrix}$$ Problem 2. $$ \begin{bmatrix} 1 & 2 \\ 2 & 1\end{bmatrix}$$ Problem 3. $$ \begin{bmatrix} 1 & 1 \\ 1 & 1\\ 0 & 0\end{bmatrix}$$ Problem 4. $$ \begin{bmatrix} 1 & -1 \\ 2 & 3\end{bmatrix}$$ Problem 5. $$ \begin{bmatrix} 1 & 1 & 3 \\ 1 & -1 & 3\end{bmatrix}$$ Problem 6. Write a Python function that does the following: * Inputs any matrix * Finds and prints it's singular values * Forms a matrices of U, V.T and sigma (and prints them in their proper shape) * Returns a product of U, V.T and sigma that's equal to the original matrix. Problems 7-11: Use the function in problem 6 to input the matrices of problems 1-5 and have an equivalent output (input=output)
github_jupyter
import numpy as np A = np.array([[1,0],[1,-1],[0,1]]) A from scipy.linalg import svd U, s, VT = svd(A) U s VT from numpy import zeros from numpy import dot m,n = A.shape[0], A.shape[1] sigma = np.zeros((m, n)) for i in range(min(m, n)): sigma[i, i] = s[i] a1 = np.dot(U, np.dot(sigma, VT)) print('The original input: \n',A) print('\n') print('The matrix after inputting: \n',np.around(a1,1))
0.271831
0.994536
## Get the Data Either use the provided .csv file or (optionally) get fresh (the freshest?) data from running an SQL query on StackExchange: Follow this link to run the query from [StackExchange](https://data.stackexchange.com/stackoverflow/query/675441/popular-programming-languages-per-over-time-eversql-com) to get your own .csv file <code> select dateadd(month, datediff(month, 0, q.CreationDate), 0) m, TagName, count(*) from PostTags pt join Posts q on q.Id=pt.PostId join Tags t on t.Id=pt.TagId where TagName in ('java','c','c++','python','c#','javascript','assembly','php','perl','ruby','visual basic','swift','r','object-c','scratch','go','swift','delphi') and q.CreationDate < dateadd(month, datediff(month, 0, getdate()), 0) group by dateadd(month, datediff(month, 0, q.CreationDate), 0), TagName order by dateadd(month, datediff(month, 0, q.CreationDate), 0) </code> ## Import Statements ``` import pandas as pd import matplotlib.pyplot as plt ``` ## Data Exploration **Challenge**: Read the .csv file and store it in a Pandas dataframe ``` df = pd.read_csv("QueryResults.csv", names=["DATE", "LANG", "POST"], header=0) ``` **Challenge**: Examine the first 5 rows and the last 5 rows of the of the dataframe ``` print(df.head()) print(df.tail()) ``` **Challenge:** Check how many rows and how many columns there are. What are the dimensions of the dataframe? ``` df.shape ``` **Challenge**: Count the number of entries in each column of the dataframe ``` print(df.columns) print(df["DATE"].count()) print(df["LANG"].count()) print(df["POST"].count()) ``` **Challenge**: Calculate the total number of post per language. Which Programming language has had the highest total number of posts of all time? ``` df.groupby("LANG").sum() ``` Some languages are older (e.g., C) and other languages are newer (e.g., Swift). The dataset starts in September 2008. **Challenge**: How many months of data exist per language? Which language had the fewest months with an entry? ``` print(df.groupby("LANG")["DATE"].count()) print(df.groupby("LANG").DATE.count()) ``` ## Data Cleaning Let's fix the date format to make it more readable. We need to use Pandas to change format from a string of "2008-07-01 00:00:00" to a datetime object with the format of "2008-07-01" ``` from datetime import datetime date_pandas = df['DATE'] date_python = [date.split(" ")[0] for date in date_pandas] date_object = [datetime.strptime(date.split(" ")[0], '%Y-%m-%d') for date in date_pandas] date_date = [object.date() for object in date_object] print(date_date[0]) print(df["DATE"]) print(df.DATE) df.DATE = pd.to_datetime(df.DATE) df.DATE ``` ## Data Manipulation ``` print(df.shape) print(df.columns) print(df.head()) ``` **Challenge**: What are the dimensions of our new dataframe? How many rows and columns does it have? Print out the column names and print out the first 5 rows of the dataframe. ``` df.shape print(df.columns) print(df.head()) df.isna().values.any() ``` **Challenge**: Count the number of entries per programming language. Why might the number of entries be different? ``` test_df = pd.DataFrame({'Age': ['Young', 'Young', 'Young', 'Young', 'Old', 'Old', 'Old', 'Old'], 'Actor': ['Jack', 'Arnold', 'Keanu', 'Sylvester', 'Jack', 'Arnold', 'Keanu', 'Sylvester'], 'Power': [100, 80, 25, 50, 99, 75, 5, 30]}) test_df pivoted_df = test_df.pivot(index="Age", columns="Actor", values="Power") pivoted_df reshaped_df = df.pivot(index="DATE", columns="LANG", values="POST") print(reshaped_df.shape) print(reshaped_df.head()) print(reshaped_df.tail()) print(reshaped_df.columns) print(reshaped_df.count()) print(reshaped_df.isna().values.any()) reshaped_df.fillna(value=0, inplace=True) print(reshaped_df.count()) print(reshaped_df.head()) print(reshaped_df.tail()) reshaped_df.isna().values.any() ``` ## Data Visualisaton with with Matplotlib **Challenge**: Use the [matplotlib documentation](https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.pyplot.plot.html#matplotlib.pyplot.plot) to plot a single programming language (e.g., java) on a chart. ``` xpoints_python = df.loc[df["LANG"] == "python", "DATE"] ypoints_python = df.loc[df["LANG"] == "python", "POST"] xpoints_java = df.loc[df["LANG"] == "java", "DATE"] ypoints_java = df.loc[df["LANG"] == "java", "POST"] xpoints_go = df.loc[df["LANG"] == "go", "DATE"] ypoints_go = df.loc[df["LANG"] == "go", "POST"] xpoints_javascript = df.loc[df["LANG"] == "javascript", "DATE"] ypoints_javascript = df.loc[df["LANG"] == "javascript", "POST"] xpoints_ruby = df.loc[df["LANG"] == "ruby", "DATE"] ypoints_ruby = df.loc[df["LANG"] == "ruby", "POST"] xpoints_php = df.loc[df["LANG"] == "php", "DATE"] ypoints_php = df.loc[df["LANG"] == "php", "POST"] xaxis_python = reshaped_df.index yaxis_python = reshaped_df.python xaxis_java = reshaped_df.index yaxis_java = reshaped_df.java xaxis_go = reshaped_df.index yaxis_go = reshaped_df.go xaxis_javascript = reshaped_df.index yaxis_javascript = reshaped_df.javascript xaxis_ruby = reshaped_df.index yaxis_ruby = reshaped_df.ruby xaxis_php = reshaped_df.index yaxis_php = reshaped_df.php plt.figure(figsize=(16, 10)) plt.xlabel("Date", fontsize=18) plt.ylabel("Number of Post", fontsize=18) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.plot(xaxis_python, yaxis_python, label="python") plt.plot(xpoints_java, ypoints_java, label="java") plt.legend() plt.show plt.figure(figsize=(16, 10)) plt.xlabel("Date", fontsize=18) plt.ylabel("Number of Post", fontsize=18) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.ylim(0, 40000) for language in reshaped_df.columns: plt.plot(reshaped_df.index, reshaped_df[language], label=reshaped_df[language].name) plt.legend(fontsize=16) plt.show ``` **Challenge**: Show two line (e.g. for Java and Python) on the same chart. ``` roll_df = reshaped_df.rolling(window=6).mean() plt.figure(figsize=(14, 8)) plt.xlabel("Date", fontsize=18) plt.ylabel("Number of Post", fontsize=18) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.ylim(0, 40000) for language in roll_df.columns: plt.plot(roll_df.index, roll_df[language], label=roll_df[language].name, linewidth=3) plt.legend(fontsize=16) plt.show() ``` # Smoothing out Time Series Data Time series data can be quite noisy, with a lot of up and down spikes. To better see a trend we can plot an average of, say 6 or 12 observations. This is called the rolling mean. We calculate the average in a window of time and move it forward by one overservation. Pandas has two handy methods already built in to work this out: [rolling()](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.rolling.html) and [mean()](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.core.window.rolling.Rolling.mean.html). ``` ```
github_jupyter
import pandas as pd import matplotlib.pyplot as plt df = pd.read_csv("QueryResults.csv", names=["DATE", "LANG", "POST"], header=0) print(df.head()) print(df.tail()) df.shape print(df.columns) print(df["DATE"].count()) print(df["LANG"].count()) print(df["POST"].count()) df.groupby("LANG").sum() print(df.groupby("LANG")["DATE"].count()) print(df.groupby("LANG").DATE.count()) from datetime import datetime date_pandas = df['DATE'] date_python = [date.split(" ")[0] for date in date_pandas] date_object = [datetime.strptime(date.split(" ")[0], '%Y-%m-%d') for date in date_pandas] date_date = [object.date() for object in date_object] print(date_date[0]) print(df["DATE"]) print(df.DATE) df.DATE = pd.to_datetime(df.DATE) df.DATE print(df.shape) print(df.columns) print(df.head()) df.shape print(df.columns) print(df.head()) df.isna().values.any() test_df = pd.DataFrame({'Age': ['Young', 'Young', 'Young', 'Young', 'Old', 'Old', 'Old', 'Old'], 'Actor': ['Jack', 'Arnold', 'Keanu', 'Sylvester', 'Jack', 'Arnold', 'Keanu', 'Sylvester'], 'Power': [100, 80, 25, 50, 99, 75, 5, 30]}) test_df pivoted_df = test_df.pivot(index="Age", columns="Actor", values="Power") pivoted_df reshaped_df = df.pivot(index="DATE", columns="LANG", values="POST") print(reshaped_df.shape) print(reshaped_df.head()) print(reshaped_df.tail()) print(reshaped_df.columns) print(reshaped_df.count()) print(reshaped_df.isna().values.any()) reshaped_df.fillna(value=0, inplace=True) print(reshaped_df.count()) print(reshaped_df.head()) print(reshaped_df.tail()) reshaped_df.isna().values.any() xpoints_python = df.loc[df["LANG"] == "python", "DATE"] ypoints_python = df.loc[df["LANG"] == "python", "POST"] xpoints_java = df.loc[df["LANG"] == "java", "DATE"] ypoints_java = df.loc[df["LANG"] == "java", "POST"] xpoints_go = df.loc[df["LANG"] == "go", "DATE"] ypoints_go = df.loc[df["LANG"] == "go", "POST"] xpoints_javascript = df.loc[df["LANG"] == "javascript", "DATE"] ypoints_javascript = df.loc[df["LANG"] == "javascript", "POST"] xpoints_ruby = df.loc[df["LANG"] == "ruby", "DATE"] ypoints_ruby = df.loc[df["LANG"] == "ruby", "POST"] xpoints_php = df.loc[df["LANG"] == "php", "DATE"] ypoints_php = df.loc[df["LANG"] == "php", "POST"] xaxis_python = reshaped_df.index yaxis_python = reshaped_df.python xaxis_java = reshaped_df.index yaxis_java = reshaped_df.java xaxis_go = reshaped_df.index yaxis_go = reshaped_df.go xaxis_javascript = reshaped_df.index yaxis_javascript = reshaped_df.javascript xaxis_ruby = reshaped_df.index yaxis_ruby = reshaped_df.ruby xaxis_php = reshaped_df.index yaxis_php = reshaped_df.php plt.figure(figsize=(16, 10)) plt.xlabel("Date", fontsize=18) plt.ylabel("Number of Post", fontsize=18) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.plot(xaxis_python, yaxis_python, label="python") plt.plot(xpoints_java, ypoints_java, label="java") plt.legend() plt.show plt.figure(figsize=(16, 10)) plt.xlabel("Date", fontsize=18) plt.ylabel("Number of Post", fontsize=18) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.ylim(0, 40000) for language in reshaped_df.columns: plt.plot(reshaped_df.index, reshaped_df[language], label=reshaped_df[language].name) plt.legend(fontsize=16) plt.show roll_df = reshaped_df.rolling(window=6).mean() plt.figure(figsize=(14, 8)) plt.xlabel("Date", fontsize=18) plt.ylabel("Number of Post", fontsize=18) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.ylim(0, 40000) for language in roll_df.columns: plt.plot(roll_df.index, roll_df[language], label=roll_df[language].name, linewidth=3) plt.legend(fontsize=16) plt.show()
0.374104
0.879354
# WeatherPy ---- #### Note * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps. ``` # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import numpy as np import requests import time import json import gmaps import scipy.stats as st from scipy.stats import linregress # Import API key from api_keys import weather_api_key # Incorporated citipy to determine city based on latitude and longitude from citipy import citipy # Output File (CSV) output_data_file = "output_data/cities.csv" # Range of latitudes and longitudes lat_range = (-90, 90) lng_range = (-180, 180) ``` ## Generate Cities List ``` # List for holding lat_lngs and cities lat_lngs = [] cities = [] # Create a set of random lat and lng combinations lats = np.random.uniform(lat_range[0], lat_range[1], size=1500) lngs = np.random.uniform(lng_range[0], lng_range[1], size=1500) lat_lngs = zip(lats, lngs) # Identify nearest city for each lat, lng combination for lat_lng in lat_lngs: city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name # If the city is unique, then add it to a our cities list if city not in cities: cities.append(city) # Print the city count to confirm sufficient count len(cities) ``` ### Perform API Calls * Perform a weather check on each city using a series of successive API calls. * Include a print log of each city as it'sbeing processed (with the city number and city name). ``` url = 'http://api.openweathermap.org/data/2.5/weather?' query_url = url + 'appid=' + weather_api_key + '&units=imperial&q=' weather_api_key query_url city_names = [] countries = [] latitudes = [] longitudes = [] max_temps = [] humidity = [] cloudiness = [] wind_speeds = [] # Set start for loop count count = 1 for city in cities: weather_response = requests.get(query_url + city).json() print(weather_response) print(f'Retrieving response {count} out of {len(cities)}: {city}') try: # Add data to lists city_names.append(weather_response['name']) countries.append(weather_response['sys']['country']) latitudes.append(weather_response['coord']['lat']) longitudes.append(weather_response['coord']['lon']) max_temps.append(weather_response['main']['temp_max']) humidity.append(weather_response['main']['humidity']) cloudiness.append(weather_response['clouds']['all']) wind_speeds.append(weather_response['wind']['speed']) except KeyError: print(f'data for {city} seems to be not found') count +=1 ``` ### Convert Raw Data to DataFrame * Export the city data into a .csv. * Display the DataFrame ``` weather = { 'City Name': city_names, 'Country': countries, 'Latitude': latitudes, 'Longitude': longitudes, 'Max Temperature (*F)': max_temps, 'Humidity (%)': humidity, 'Cloudiness (%)': cloudiness, 'Wind Speed (MPH)': wind_speeds, } raw_weather_df = pd.DataFrame(weather) raw_weather_df weather_df = raw_weather_df.dropna() weather_df.count() weather_df weather_df.to_csv('output_data.csv',index=False) ``` ## Inspect the data and remove the cities where the humidity > 100%. ---- Skip this step if there are no cities that have humidity > 100%. ``` # Get the indices of cities that have humidity over 100%. humid_count = 0 for humid in humidity: if humid > 99: humid_count += 1 humid_count # Make a new DataFrame equal to the city data to drop all humidity outliers by index. # Passing "inplace=False" will make a copy of the city_data DataFrame, which we call "clean_city_data". ``` ## Plotting the Data * Use proper labeling of the plots using plot titles (including date of analysis) and axes labels. * Save the plotted figures as .pngs. ## Latitude vs. Temperature Plot ``` plt.figure(figsize=(10,10)) plt.scatter(latitudes, max_temps) plt.title('Latitude vs. Temperature') plt.xlabel('Latitude') plt.ylabel('Max Temperature (F)') plt.grid(alpha=0.5) plt.tight_layout() plt.show() ``` ## Latitude vs. Humidity Plot ``` plt.figure(figsize=(10,10)) plt.scatter(latitudes, humidity) plt.title('Latitude vs. Humidity') plt.xlabel('Latitude') plt.ylabel('Humidity (%)') # Format plt.grid(alpha=0.5) plt.tight_layout() plt.show() ``` ## Latitude vs. Cloudiness Plot ``` plt.figure(figsize=(7,5)) plt.scatter(latitudes, cloudiness) plt.title('Latitude vs. Cloudiness') plt.xlabel('Latitude') plt.ylabel('Cloudiness (%)') plt.grid(alpha=0.5) plt.tight_layout() plt.show() ``` ## Latitude vs. Wind Speed Plot ``` plt.figure(figsize=(7,5)) plt.scatter(latitudes, wind_speeds) plt.title('Latitude vs. Wind Speed') plt.xlabel('Latitude') plt.ylabel('Wind Speed (MPH)') plt.grid(alpha=0.5) plt.tight_layout() plt.show() ``` ## Linear Regression ``` nh_df = weather_df[weather_df['Latitude'] > 0] sh_df = weather_df[weather_df['Latitude'] < 0] nh_lat = nh_df['Latitude'] sh_lat = sh_df['Latitude'] ``` #### Northern Hemisphere - Max Temp vs. Latitude Linear Regression ``` #plot nh max temp vs latitude title = 'Northern Hemisphere - Max Temp (F) vs. Latitude' n_temp = nh_df['Max Temperature (*F)'] plt.figure(figsize=(10,10)) plt.scatter(nh_lat, n_temp, zorder=3) (slope, intercept, rvalue, pvalue, stderr) = linregress(nh_lat, n_temps) n_regress_values1 = nh_lat * slope + intercept n_equation = 'y = ' + str(round(slope, 2)) + 'x + ' + str(round(intercept, 2)) # Plot linear regression plt.plot(nh_lat, n_regress_values1, 'blue', lw=2) # Annotate plt.annotate(n_equation, (5,-10), color="red") # Label plt.title('Northern Hemisphere Max Temp vs. Latitude') plt.xlabel('Latitude') plt.ylabel('Max Temperature') # Format plt.grid(alpha=0.5) plt.tight_layout() plt.show() ``` #### Southern Hemisphere - Max Temp vs. Latitude Linear Regression ``` #plot nh max temp vs latitude title = 'Northern Hemisphere - Max Temp vs. Latitude' s_temp = sh_df['Max Temperature (*F)'] plt.figure(figsize=(10,10)) plt.scatter(sh_lat, s_temp, zorder=3) (slope, intercept, rvalue, pvalue, stderr) = linregress(nh_lat, n_temps) s_regress_values1 = sh_lat * slope + intercept n_equation = 'y = ' + str(round(slope, 2)) + 'x + ' + str(round(intercept, 2)) plt.plot(sh_lat, s_regress_values1, 'blue', lw=2) plt.annotate(n_equation, (5,-10), color="red") plt.title('Southern Hemisphere Max Temp vs. Latitude') plt.xlabel('Latitude') plt.ylabel('Max Temperature') plt.grid(alpha=0.5) plt.tight_layout() plt.show() ``` #### Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression ``` n_humid = nh_df['Humidity (%)'] plt.figure(figsize=(10,10)) plt.scatter(nh_lat, n_humid) (slope, intercept, rvalue, pvalue, stderr) = linregress(nh_lat, n_humid) n_regress_values2 = nh_lat * slope + intercept n_equation2 = 'y = ' + str(round(slope, 2)) + 'x + ' + str(round(intercept, 2)) plt.plot(nh_lat, n_regress_values2, 'blue', lw=2) plt.annotate(n_equation2, (60,10), color="red") plt.title('Northern Hemisphere Humidity vs. Latitude') plt.xlabel('Latitude') plt.ylabel('Humidity (%)') plt.grid(alpha=0.5) plt.tight_layout() plt.show() ``` #### Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression ``` s_humid = sh_df['Humidity (%)'] plt.figure(figsize=(10,10)) plt.scatter(sh_lat, s_humid) (slope, intercept, rvalue, pvalue, stderr) = linregress(sh_lat, s_humid) s_regress_values2 = sh_lat * slope + intercept s_equation2 = 'y = ' + str(round(slope, 2)) + 'x + ' + str(round(intercept, 2)) plt.plot(sh_lat, s_regress_values2, 'blue', lw=2) plt.annotate(s_equation2, (20,60), color="red") plt.title('Northern Hemisphere Humidity vs. Latitude') plt.xlabel('Latitude') plt.ylabel('Humidity (%)') plt.grid(alpha=0.5) plt.tight_layout() plt.show() ``` #### Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression #### Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression #### Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression #### Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
github_jupyter
# Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import numpy as np import requests import time import json import gmaps import scipy.stats as st from scipy.stats import linregress # Import API key from api_keys import weather_api_key # Incorporated citipy to determine city based on latitude and longitude from citipy import citipy # Output File (CSV) output_data_file = "output_data/cities.csv" # Range of latitudes and longitudes lat_range = (-90, 90) lng_range = (-180, 180) # List for holding lat_lngs and cities lat_lngs = [] cities = [] # Create a set of random lat and lng combinations lats = np.random.uniform(lat_range[0], lat_range[1], size=1500) lngs = np.random.uniform(lng_range[0], lng_range[1], size=1500) lat_lngs = zip(lats, lngs) # Identify nearest city for each lat, lng combination for lat_lng in lat_lngs: city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name # If the city is unique, then add it to a our cities list if city not in cities: cities.append(city) # Print the city count to confirm sufficient count len(cities) url = 'http://api.openweathermap.org/data/2.5/weather?' query_url = url + 'appid=' + weather_api_key + '&units=imperial&q=' weather_api_key query_url city_names = [] countries = [] latitudes = [] longitudes = [] max_temps = [] humidity = [] cloudiness = [] wind_speeds = [] # Set start for loop count count = 1 for city in cities: weather_response = requests.get(query_url + city).json() print(weather_response) print(f'Retrieving response {count} out of {len(cities)}: {city}') try: # Add data to lists city_names.append(weather_response['name']) countries.append(weather_response['sys']['country']) latitudes.append(weather_response['coord']['lat']) longitudes.append(weather_response['coord']['lon']) max_temps.append(weather_response['main']['temp_max']) humidity.append(weather_response['main']['humidity']) cloudiness.append(weather_response['clouds']['all']) wind_speeds.append(weather_response['wind']['speed']) except KeyError: print(f'data for {city} seems to be not found') count +=1 weather = { 'City Name': city_names, 'Country': countries, 'Latitude': latitudes, 'Longitude': longitudes, 'Max Temperature (*F)': max_temps, 'Humidity (%)': humidity, 'Cloudiness (%)': cloudiness, 'Wind Speed (MPH)': wind_speeds, } raw_weather_df = pd.DataFrame(weather) raw_weather_df weather_df = raw_weather_df.dropna() weather_df.count() weather_df weather_df.to_csv('output_data.csv',index=False) # Get the indices of cities that have humidity over 100%. humid_count = 0 for humid in humidity: if humid > 99: humid_count += 1 humid_count # Make a new DataFrame equal to the city data to drop all humidity outliers by index. # Passing "inplace=False" will make a copy of the city_data DataFrame, which we call "clean_city_data". plt.figure(figsize=(10,10)) plt.scatter(latitudes, max_temps) plt.title('Latitude vs. Temperature') plt.xlabel('Latitude') plt.ylabel('Max Temperature (F)') plt.grid(alpha=0.5) plt.tight_layout() plt.show() plt.figure(figsize=(10,10)) plt.scatter(latitudes, humidity) plt.title('Latitude vs. Humidity') plt.xlabel('Latitude') plt.ylabel('Humidity (%)') # Format plt.grid(alpha=0.5) plt.tight_layout() plt.show() plt.figure(figsize=(7,5)) plt.scatter(latitudes, cloudiness) plt.title('Latitude vs. Cloudiness') plt.xlabel('Latitude') plt.ylabel('Cloudiness (%)') plt.grid(alpha=0.5) plt.tight_layout() plt.show() plt.figure(figsize=(7,5)) plt.scatter(latitudes, wind_speeds) plt.title('Latitude vs. Wind Speed') plt.xlabel('Latitude') plt.ylabel('Wind Speed (MPH)') plt.grid(alpha=0.5) plt.tight_layout() plt.show() nh_df = weather_df[weather_df['Latitude'] > 0] sh_df = weather_df[weather_df['Latitude'] < 0] nh_lat = nh_df['Latitude'] sh_lat = sh_df['Latitude'] #plot nh max temp vs latitude title = 'Northern Hemisphere - Max Temp (F) vs. Latitude' n_temp = nh_df['Max Temperature (*F)'] plt.figure(figsize=(10,10)) plt.scatter(nh_lat, n_temp, zorder=3) (slope, intercept, rvalue, pvalue, stderr) = linregress(nh_lat, n_temps) n_regress_values1 = nh_lat * slope + intercept n_equation = 'y = ' + str(round(slope, 2)) + 'x + ' + str(round(intercept, 2)) # Plot linear regression plt.plot(nh_lat, n_regress_values1, 'blue', lw=2) # Annotate plt.annotate(n_equation, (5,-10), color="red") # Label plt.title('Northern Hemisphere Max Temp vs. Latitude') plt.xlabel('Latitude') plt.ylabel('Max Temperature') # Format plt.grid(alpha=0.5) plt.tight_layout() plt.show() #plot nh max temp vs latitude title = 'Northern Hemisphere - Max Temp vs. Latitude' s_temp = sh_df['Max Temperature (*F)'] plt.figure(figsize=(10,10)) plt.scatter(sh_lat, s_temp, zorder=3) (slope, intercept, rvalue, pvalue, stderr) = linregress(nh_lat, n_temps) s_regress_values1 = sh_lat * slope + intercept n_equation = 'y = ' + str(round(slope, 2)) + 'x + ' + str(round(intercept, 2)) plt.plot(sh_lat, s_regress_values1, 'blue', lw=2) plt.annotate(n_equation, (5,-10), color="red") plt.title('Southern Hemisphere Max Temp vs. Latitude') plt.xlabel('Latitude') plt.ylabel('Max Temperature') plt.grid(alpha=0.5) plt.tight_layout() plt.show() n_humid = nh_df['Humidity (%)'] plt.figure(figsize=(10,10)) plt.scatter(nh_lat, n_humid) (slope, intercept, rvalue, pvalue, stderr) = linregress(nh_lat, n_humid) n_regress_values2 = nh_lat * slope + intercept n_equation2 = 'y = ' + str(round(slope, 2)) + 'x + ' + str(round(intercept, 2)) plt.plot(nh_lat, n_regress_values2, 'blue', lw=2) plt.annotate(n_equation2, (60,10), color="red") plt.title('Northern Hemisphere Humidity vs. Latitude') plt.xlabel('Latitude') plt.ylabel('Humidity (%)') plt.grid(alpha=0.5) plt.tight_layout() plt.show() s_humid = sh_df['Humidity (%)'] plt.figure(figsize=(10,10)) plt.scatter(sh_lat, s_humid) (slope, intercept, rvalue, pvalue, stderr) = linregress(sh_lat, s_humid) s_regress_values2 = sh_lat * slope + intercept s_equation2 = 'y = ' + str(round(slope, 2)) + 'x + ' + str(round(intercept, 2)) plt.plot(sh_lat, s_regress_values2, 'blue', lw=2) plt.annotate(s_equation2, (20,60), color="red") plt.title('Northern Hemisphere Humidity vs. Latitude') plt.xlabel('Latitude') plt.ylabel('Humidity (%)') plt.grid(alpha=0.5) plt.tight_layout() plt.show()
0.553264
0.814717
# Basic Example using patterns classifier ``` import sys sys.path.append("../") %load_ext autoreload %autoreload 2 import pandas as pd import numpy as np import sklearn.metrics from modules.utils import settings from sklearn import preprocessing import joblib from modules.pattern_classifier import SimpleClassifier, PatternVectorizer # Load annotated tweets col = ["text", "emo1", "emo2", "emo3"] filename = 'data/datasets/annotated_tweets.tsv' tweets = pd.read_table(filename, header=None, names=col) ``` ## 1. Load Vectorizer and Classifier Instance Load a PatternVectorizer pv and SimpleClassifier cls instances from a folder containing files for each classes with pattern and corresponding score. ``` emo_pv, emo_clf = SimpleClassifier.load_from_folder('data/datasets/emotion_patterns/8_emos/') ``` ## Optional: Persist Classifier Instance to disk ``` joblib.dump(emo_clf, settings.EMO_CLF, compress=True) joblib.dump(emo_pv, settings.EMO_PV, compress=True) new_clf = joblib.load(settings.EMO_CLF) new_pv = joblib.load(settings.EMO_PV) # You can load the models here, comment the dumps and change the variable names from cls and pv to new_cls # and new_pv respectively ``` ## 2. Vectorize the tweets [tweet1, tweet2 ...] -> [[O11, O12, ...], [O21, O22, ...] ...] Oij represent the number of occurence of the jth pattern in the ith tweet. ``` documentPatternVectors = new_pv.transform(tweets.text) documentPatternVectors[:5] ``` ## 3. Classify tweets ``` # using one emotion, guess the one with the smallest rank Y_GUESS_1 = new_clf.get_min_score_class(documentPatternVectors) Y_GUESS_1[:5] # using two emotion, guess the one with the smallest rank Y_GUESS_2 = new_clf.get_top_classes(documentPatternVectors, ascending=True, n=2) Y_GUESS_2[:5] ``` ## 4. Evaluate your model ``` le = preprocessing.LabelEncoder() le.fit(new_clf.classes) Y = le.transform(tweets.emo1.values.tolist()) Y_GUESS_1 = le.transform(Y_GUESS_1) print(sklearn.metrics.classification_report(Y, Y_GUESS_1, target_names = le.classes_)) print('Accuracy:') print(sklearn.metrics.accuracy_score(Y, Y_GUESS_1)) # Average Jacqard similarity of the two guess with the 2 label tweets = tweets.fillna('None') tweets['emotions'] = tweets.apply(lambda t: set((t.emo1 , t.emo2, t.emo3)) - {'None'}, axis=1) nb_tweets = len(tweets.emotions.values) sum([len(tweets.emotions[i] & set(Y_GUESS_2[i])) / len(tweets.emotions[i] | set(Y_GUESS_2[i])) for i in range(nb_tweets)]) / nb_tweets # Guess 2 accuracy sum([tweets.emo1[i] in set(Y_GUESS_2[i]) for i in range(nb_tweets)]) / nb_tweets # Accuracy conting positive if there is an intersection between the two guess and the labels sum([len(tweets.emotions[i] & set(Y_GUESS_2[i])) > 0 for i in range(nb_tweets)]) / nb_tweets ```
github_jupyter
import sys sys.path.append("../") %load_ext autoreload %autoreload 2 import pandas as pd import numpy as np import sklearn.metrics from modules.utils import settings from sklearn import preprocessing import joblib from modules.pattern_classifier import SimpleClassifier, PatternVectorizer # Load annotated tweets col = ["text", "emo1", "emo2", "emo3"] filename = 'data/datasets/annotated_tweets.tsv' tweets = pd.read_table(filename, header=None, names=col) emo_pv, emo_clf = SimpleClassifier.load_from_folder('data/datasets/emotion_patterns/8_emos/') joblib.dump(emo_clf, settings.EMO_CLF, compress=True) joblib.dump(emo_pv, settings.EMO_PV, compress=True) new_clf = joblib.load(settings.EMO_CLF) new_pv = joblib.load(settings.EMO_PV) # You can load the models here, comment the dumps and change the variable names from cls and pv to new_cls # and new_pv respectively documentPatternVectors = new_pv.transform(tweets.text) documentPatternVectors[:5] # using one emotion, guess the one with the smallest rank Y_GUESS_1 = new_clf.get_min_score_class(documentPatternVectors) Y_GUESS_1[:5] # using two emotion, guess the one with the smallest rank Y_GUESS_2 = new_clf.get_top_classes(documentPatternVectors, ascending=True, n=2) Y_GUESS_2[:5] le = preprocessing.LabelEncoder() le.fit(new_clf.classes) Y = le.transform(tweets.emo1.values.tolist()) Y_GUESS_1 = le.transform(Y_GUESS_1) print(sklearn.metrics.classification_report(Y, Y_GUESS_1, target_names = le.classes_)) print('Accuracy:') print(sklearn.metrics.accuracy_score(Y, Y_GUESS_1)) # Average Jacqard similarity of the two guess with the 2 label tweets = tweets.fillna('None') tweets['emotions'] = tweets.apply(lambda t: set((t.emo1 , t.emo2, t.emo3)) - {'None'}, axis=1) nb_tweets = len(tweets.emotions.values) sum([len(tweets.emotions[i] & set(Y_GUESS_2[i])) / len(tweets.emotions[i] | set(Y_GUESS_2[i])) for i in range(nb_tweets)]) / nb_tweets # Guess 2 accuracy sum([tweets.emo1[i] in set(Y_GUESS_2[i]) for i in range(nb_tweets)]) / nb_tweets # Accuracy conting positive if there is an intersection between the two guess and the labels sum([len(tweets.emotions[i] & set(Y_GUESS_2[i])) > 0 for i in range(nb_tweets)]) / nb_tweets
0.491456
0.849035
<a href="https://colab.research.google.com/github/StephenTGibson/COMP527-colabNotebooks/blob/main/Numpy%2C_Matplotlib%2C_and_Math_Preliminaries.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Numpy, Matplotlib and Math preliminaries Numpy is a python library that provides data structures useful for data mining such as arrays, and various functions on those data structures. Follow [the official numpy tutorial](https://numpy.org/devdocs/user/quickstart.html) and familiarize yourself with numpy. Using only Numpy as an external library complete the following exercises. ``` import numpy as np ``` ## Excercise 1 Given $\overline{X} = (1,2,3,4,5,6,7,8,9,10)^T$ and $\overline{Y} = (10,9,8,7,6,5,4,3,2,1)^T$ find 1. $\overline{X} + \overline{Y}$ 2. $\overline{X}^T \overline{Y}$ 2. $\overline{X}\overline{Y}^T$ ``` X = np.transpose(np.array([1,2,3,4,5,6,7,8,9,10])) Y = np.transpose(np.array([10,9,8,7,6,5,4,3,2,1])) # 1 X + Y # 2 np.dot(np.transpose(X), Y) # 3 np.outer(X, np.transpose(Y)) ``` --- ## Excercise 2 Given two matrices $\overline{A} = \begin{pmatrix}1 & 2 & 3 & 4 & 5\\ 6 & 7 & 8 & 9 & 10\\ 11 & 12 & 13 & 14 & 15\\ 16 & 17 & 18 & 19 & 20\\ 21 & 22 & 23 & 24 & 25 \end{pmatrix}$ and $\overline{B} = \begin{pmatrix}0 & 1 & 0 & 1 & 0\\ 1 & 2 & 3 & 4 & 5\\ -1 & 0 & 1 & 0 & -1 \\ 5 & 4 & 3 & 2 & 5\\ -1 & 0 & 1 & 0 & -1 \end{pmatrix}$ 1. Compute $\overline{A} + \overline{B}$ 2. Compute $\overline{B} + \overline{A}$. Is it equal to $\overline{A} + \overline{B}$? Is it always the case? 3. Compute $\overline{A} \cdot \overline{B}$ 4. Compute $\overline{B} \cdot \overline{A}$. Is it equal to $\overline{A} \cdot \overline{B}$? the last axis is printed from left to right, the second-to-last is printed from top to bottom, the rest are also printed from top to bottom, with each slice separated from the next by an empty line. ``` A = np.array([[1,2,3,4,5],[6,7,8,9,10],[11,12,13,14,15],[16,17,18,19,20],[21,22,23,24,25]]) B = np.array([[0,1,0,1,0],[1,2,3,4,5],[-1,0,1,0,-1],[5,4,3,2,5],[-1,0,1,0,-1]]) print(f'A:\n{A}\n\nB:\n{B}') # 1 A + B # 2 B + A # 3 # * does elementwise multiplication only # @ or dot method does matrix multiplication A @ B # 4 B @ A ``` --- ## Excercise 3 Compute the inverse of the following matrix $\overline{A} = \begin{pmatrix} 1 & 2 & 4\\ -2 & 1 & 5 \\1 & 2 & 3 \end{pmatrix}$, if one exsits. Verify that the matrix product of $\overline{A}$ and its inverse is the 3x3 identity matrix. ``` A = np.array([[1,2,4],[-2,1,5],[1,2,3]]) np.linalg.inv(A) A @ np.linalg.inv(A) ``` --- ## Excercise 4 Find the ranks of the following matrices $\overline{A} = \left(\begin{matrix} 1 & 0 & 1 \\ 0 & 1 & 1\\ 0 & 0 & 0\end{matrix}\right)~$ and $~\overline{B} = \left(\begin{matrix} 1 & 2 & 1 \\-2 & -3 & 1\\ 3 & 5 & 0\end{matrix}\right)$. ``` A = np.array([[1,0,1],[0,1,1],[0,0,0]]) B = np.array([[1,2,1],[-2,-3,1],[3,5,0]]) np.linalg.matrix_rank(A) np.linalg.matrix_rank(B) ``` --- ## Excercise 5 Find the eigenvalues of matrix $\overline{A} = \left(\begin{matrix} 4 & 2\\ 1 & 3\end{matrix}\right)$ ``` A = np.array([[4,2],[1,3]]) np.linalg.eig(A) ``` --- ## Excercise 6 For this excercise we will need [Matplotlib](https://matplotlib.org/index.html). Follow [the official Matplotlib tutorial](https://matplotlib.org/tutorials/introductory/pyplot.html#) and familiarize yourself with Matplotlib. Recall from the lecture the Gradient Descent method for finding local minimum of a function: 1. Pick an initial point $\overline{X}_0$ 2. Iterate according to $\overline{X}_{i+1} = \overline{X}_i - \gamma \cdot \big((\nabla_{\overline{X}} f)(\overline{X}_i) \big)$ Examine this method by trying to find the minimum of the function $f(x) = (x-3)^2$. More specifically, for every $\gamma \in \{0.01, 0.1, 0.9, 1, 2\}$: 1. Plot the graph of $f(x) = (x-3)^2$ 2. Pick an intial point $x = -4$ 3. Run 20 interations of the methods 4. In every iteration $i = 1, \ldots, 20$, plot the point $(x_i, f(x_i))$ on the same plot as the graph of the function $f(x)$ Interpret the results. ``` import matplotlib.pyplot as plt # 1 Xrange = np.arange(-7,12,1) fx = (Xrange-3)**2 plt.plot(Xrange,fx) def calc_fx(x): return (x-3)**2 def calc_xiplus1(xi, gamma): return xi - gamma*2*(xi-3) xValue = -4 gamma = 0.01 xArray = np.array([xValue]) for i in range(1, 20): xArray = np.append(xArray, calc_xiplus1(xArray[-1], gamma)) fxArray = np.array(calc_fx(xArray)) Xrange = np.arange(-7,12,1) fx = (Xrange-3)**2 fig, ax = plt.subplots() ax.plot(Xrange,fx) ax.plot(xArray, fxArray) xValue = -4 gamma = 0.1 xArray = np.array([xValue]) for i in range(1, 20): xArray = np.append(xArray, calc_xiplus1(xArray[-1], gamma)) fxArray = np.array(calc_fx(xArray)) Xrange = np.arange(-7,12,1) fx = (Xrange-3)**2 fig, ax = plt.subplots() ax.plot(Xrange,fx) ax.plot(xArray, fxArray) print(xArray) xValue = -4 gamma = 0.9 xArray = np.array([xValue]) for i in range(1, 20): xArray = np.append(xArray, calc_xiplus1(xArray[-1], gamma)) fxArray = np.array(calc_fx(xArray)) Xrange = np.arange(-7,12,1) fx = (Xrange-3)**2 fig, ax = plt.subplots() ax.plot(Xrange,fx) ax.plot(xArray, fxArray) xValue = -4 gamma = 1 xArray = np.array([xValue]) for i in range(1, 20): xArray = np.append(xArray, calc_xiplus1(xArray[-1], gamma)) fxArray = np.array(calc_fx(xArray)) Xrange = np.arange(-7,12,1) fx = (Xrange-3)**2 fig, ax = plt.subplots() ax.plot(Xrange,fx) ax.plot(xArray, fxArray) xValue = -4 gamma = 2 xArray = np.array([xValue]) for i in range(1, 20): xArray = np.append(xArray, calc_xiplus1(xArray[-1], gamma)) fxArray = np.array(calc_fx(xArray)) Xrange = np.arange(-7,12,1) fx = (Xrange-3)**2 fig, ax = plt.subplots() ax.plot(Xrange,fx) ax.plot(xArray, fxArray) ```
github_jupyter
import numpy as np X = np.transpose(np.array([1,2,3,4,5,6,7,8,9,10])) Y = np.transpose(np.array([10,9,8,7,6,5,4,3,2,1])) # 1 X + Y # 2 np.dot(np.transpose(X), Y) # 3 np.outer(X, np.transpose(Y)) A = np.array([[1,2,3,4,5],[6,7,8,9,10],[11,12,13,14,15],[16,17,18,19,20],[21,22,23,24,25]]) B = np.array([[0,1,0,1,0],[1,2,3,4,5],[-1,0,1,0,-1],[5,4,3,2,5],[-1,0,1,0,-1]]) print(f'A:\n{A}\n\nB:\n{B}') # 1 A + B # 2 B + A # 3 # * does elementwise multiplication only # @ or dot method does matrix multiplication A @ B # 4 B @ A A = np.array([[1,2,4],[-2,1,5],[1,2,3]]) np.linalg.inv(A) A @ np.linalg.inv(A) A = np.array([[1,0,1],[0,1,1],[0,0,0]]) B = np.array([[1,2,1],[-2,-3,1],[3,5,0]]) np.linalg.matrix_rank(A) np.linalg.matrix_rank(B) A = np.array([[4,2],[1,3]]) np.linalg.eig(A) import matplotlib.pyplot as plt # 1 Xrange = np.arange(-7,12,1) fx = (Xrange-3)**2 plt.plot(Xrange,fx) def calc_fx(x): return (x-3)**2 def calc_xiplus1(xi, gamma): return xi - gamma*2*(xi-3) xValue = -4 gamma = 0.01 xArray = np.array([xValue]) for i in range(1, 20): xArray = np.append(xArray, calc_xiplus1(xArray[-1], gamma)) fxArray = np.array(calc_fx(xArray)) Xrange = np.arange(-7,12,1) fx = (Xrange-3)**2 fig, ax = plt.subplots() ax.plot(Xrange,fx) ax.plot(xArray, fxArray) xValue = -4 gamma = 0.1 xArray = np.array([xValue]) for i in range(1, 20): xArray = np.append(xArray, calc_xiplus1(xArray[-1], gamma)) fxArray = np.array(calc_fx(xArray)) Xrange = np.arange(-7,12,1) fx = (Xrange-3)**2 fig, ax = plt.subplots() ax.plot(Xrange,fx) ax.plot(xArray, fxArray) print(xArray) xValue = -4 gamma = 0.9 xArray = np.array([xValue]) for i in range(1, 20): xArray = np.append(xArray, calc_xiplus1(xArray[-1], gamma)) fxArray = np.array(calc_fx(xArray)) Xrange = np.arange(-7,12,1) fx = (Xrange-3)**2 fig, ax = plt.subplots() ax.plot(Xrange,fx) ax.plot(xArray, fxArray) xValue = -4 gamma = 1 xArray = np.array([xValue]) for i in range(1, 20): xArray = np.append(xArray, calc_xiplus1(xArray[-1], gamma)) fxArray = np.array(calc_fx(xArray)) Xrange = np.arange(-7,12,1) fx = (Xrange-3)**2 fig, ax = plt.subplots() ax.plot(Xrange,fx) ax.plot(xArray, fxArray) xValue = -4 gamma = 2 xArray = np.array([xValue]) for i in range(1, 20): xArray = np.append(xArray, calc_xiplus1(xArray[-1], gamma)) fxArray = np.array(calc_fx(xArray)) Xrange = np.arange(-7,12,1) fx = (Xrange-3)**2 fig, ax = plt.subplots() ax.plot(Xrange,fx) ax.plot(xArray, fxArray)
0.356559
0.987841
<h1> 주요감사업무 수행내용 지도학습 </h1> <h3> 가. 전처리 </h3> #### 1. 엑셀 전처리 1. 외부감사 실시내용에서 공시되는 주요감사업무 수행내용 Text 입수 (2017년 1월말 - 2019년 12월말 법인 공시자료) → 352,002건 2. 엑셀 전처리 : ① 전반감사계획(감사착수단계) 제외 ② 공백(NA) 또는 의미없는 부호 제거 ③ 중복 제거 → 17,540건 #### 2. Python 전처리 전처리에 사용되는 모듈을 가져온다 엑셀에서 1차적으로 처리가 되어있어서, 정규식으로 특수문자를 제거하고, 띄어쓰기를 무시하기 위하여 공백을 제거하는 처리만 수행한다. <br> * 띄어쓰기를 무시하는 이유는, ① 본 분석에서 사용하는 데이터에는 일반적으로 사용되는 지도 방식의 형태소 분석기(ex. KoNLPy)의 퍼포먼스가 좋지 않았고, ② 입수 텍스트의 띄어쓰기 품질이 좋지 않았기 때문이다. 본 분석에서는 비지도방식의 형태소 분석기인 soynlp의 MaxScoreTokenizer를 사용한다. <p>[참고]</p> 1. <a ref="https://github.com/lovit/soynlp">soynlp github</a> <br> 2. https://wikidocs.net/92961 ``` import pandas as pd import re ``` 엑셀로 전처리가 수행된 입수 데이터를 Dataframe으로 가져온다 ``` df = pd.read_excel("data.xlsx") df.head() ``` 전처리 로직으로 특수 문자/공백/불용어를 제거한다. 형태소분석기에서 불용어를 적절히 분리해내지 못하므로, 선택적으로 제거한다. <p>[참고]</p> 1. https://somjang.tistory.com/<br> 2. https://wikidocs.net/22530 <br> 3. https://stackoverflow.com/questions/3411771/best-way-to-replace-multiple-characters-in-a-string ``` data = [] for i in list(df["DATA"]): temp = re.sub('[-=+,#/\?:^$.@*\"※~&%ㆍ!』\\‘|\(\)\[\]\<\>`\'…》]', '', str(i)) temp = ''.join(temp.split()) temp = temp.replace("의견","$$").replace("과목","%%").replace("효과성","##").replace("결과","@@") stopword = ["유의적", "및", "의", "등", "에대한", "에대해", "에대하여", "와", "과", "를", "을"] for word in stopword: temp = temp.replace(word," ") temp = temp.replace("$$", "의견").replace("%%", "과목").replace("##", "효과성").replace("@@","결과") data.append(temp) df["DATA"] = data df.head() ``` <h3> 나. Export Unsupervised dataset </h3> 엑셀 전처리 과정에서 입수된 데이터는 정렬되어있다. 이를 임의로 섞어서 이후 추출할 Train, Test 자료의 대표성을 높여준다. <p>[참고]</p> 1. https://stackoverflow.com/questions/29576430/shuffle-dataframe-rows ``` df = df.sample(frac=1).reset_index(drop=True) df.head() ``` 각각의 자료에서 다시 임의로 600개씩 선택하여 Train과 Test 데이터로 만든다. (최초 생성 데이터 계속 활용) ``` train, test = df.sample(n=600), df.sample(n=600) ``` Train과 Test Dataset을 Excel로 추출한다 ``` train.to_excel("train.xlsx") test.to_excel("test.xlsx") ``` <h3>다. 데이터에 전문가적 판단 반영 (엑셀에서 수행)</h3> <p>지도학습을 수행하기 위하여 추출된 1,200개의 샘플에 전문가적 판단에 따라 아래의 label을 붙인다. (분반기 검토 업무의 경우 분반기 시간이 따로 공시되는 점을 고려하여 따로 Label을 붙인다.)</p> <ul> <li>1: 위험평가/계획</li> <li>2: 입증감사절차/의견형성</li> <li>3: 분반기검토</li> </ul> <p>-예외 처리 규칙-</p> <ol> <li>두 가지 단계가 섞여있다고 판단되는 경우 감사의 경우 입증감사절차, 검토의 경우 분반기검토로 분류한다.</li> <li>기초잔액감사, 기중 하드클로징 등의 경우 기중 수행업무임에도 입증감사절차로 분류한다.</li> <li>전기 재무제표 검토의 경우 기초잔액감사와 달리 위험평가로 분류한다.</li> <li>분석적 절차/거래 검토의 경우 입증 절차로 분류한다.</li> <li>해외 자회사 방문 등 기중 실사의 경우 입증 목적이 아닌 경우 위험평가/계획으로 분류한다.</li> </ol> <h3> 라. 형태소 분석기 교육 </h3> 위에 언급한 바, 본 분석의 경우 분석대상 형태소가 사전에 주어진 KoNLPy의 분석툴 사용이 적절하지 않다. 많은 회계감사의 용어가 해당 툴의 사전에 반영되어 있지 않아, 부적절한 분석결과가 산출될 수 있다. 아래의 예로 이 상황을 확인할 수 있다. ``` from konlpy.tag import Okt okt = Okt() print(okt.morphs("중요 계정잔액 및 공시내용에 대해 질문 분석적절차 표본점검 문서검증 재계산 등")) ``` 따라서 이번 분석에서는 비지도학습 기반 형태소 분석기인 soynlp를 사용하는데, 전체 데이터를 이용하여 응집확률을 계산하고, 이를 활용하여 형태소를 분석하고 일부 응집 점수를 튜닝하여 TF-IDF 적용전 Vectorize된 Data를 만든다. ``` from soynlp import DoublespaceLineCorpus from soynlp.word import WordExtractor from soynlp.tokenizer import MaxScoreTokenizer word_extractor = WordExtractor() word_extractor.train(df["DATA"]) word_score_table = word_extractor.extract() scores = {word:score.cohesion_forward for word, score in word_score_table.items()} tokenizer = MaxScoreTokenizer(scores=scores) ``` 아래는 분석기 적용내용을 확인후, 필요한 형태소에 가중치 1을 부여하는 score 튜닝을 적용한 내용이다. ``` tuning = {'중요계정잔액':1, '공시내용':1, '분석적절차':1, '표본점검':1, '문서검증':1, '재계산':1 ,'중요거래유형':1, '내부통제':1, '분석적검토':1, '중간감사':1, '조회서':1, '감사계약':1 ,'입증감사':1, '기말감사':1, '재고자산':1, '실사입회':1, '내부회계관리제도':1, '연결재무제표':1 ,'연결조정분개':1, '계획':1, '이해':1, '투자자본상계':1, '일반감사절차':1, '감사결과':1 ,'반기검토':1, '분기검토':1, '후속기간':1, '통제테스트':1, '잔액검증':1, '입증감사':1 ,'중요한왜곡표시':1, '식별':1, '기초잔액':1, '실증절차':1, '표본테스트':1, '현장감사':1 ,'별도재무제표':1, '경영자주장':1, '설계':1, '운영':1, '주석사항':1, '효과성':1 ,'회계처리':1, '재무제표':1, '전산감사':1 ,'사전검토':1, '계정별':1, '감사보고서':1, '종속기업':1, '수행':1, '금융자산':1 ,'이해및평가':1, '내부통제제도':1, '중요공시절차':1, '테스트':1, 'test':1, '실재성':1 ,'감사절차':1, '주석':1, '성격범위시기':1, '내부회계':1, '피투자회사':1, '지배회사':1 ,'분반기검토':1, '프로세스':1, '거래유형':1, '통제위험':1, '외부조회':1, '경영자':1 ,'계정과목':1, '중요계정':1, '종속회사':1, '내부통제테스트':1, '입증절차':1, '보고서':1 ,'변동사항':1, '업무수행':1, '전기조서':1, '감사위험':1, '예비적':1, '지배기구':1 ,'입증시사':1, '1분기':1, '2분기':1, '3분기':1, '일반기업회계기준':1, '조기입증':1 ,'부문감사인':1, '그룹감사':1, '중요성':1, '초도감사':1, '의견형성':1, '자회사':1} scores.update(tuning) ``` 아래에서 튜닝되지 않은 OKT와 튜닝된 soynlp 형태소 분석기의 성능 차이를 확인할 수 있다. (사실 이렇게 되면 훈련된 것으로 볼 수 있어서, okt의 사전 추가 등으로 같은 결과를 얻을 수 있을것 같다.) ``` print("soynlp(tuned): {0}".format(tokenizer.tokenize('중요계정잔액및공시내용에대해질문분석적절차표본점검문서검증재계산등)'))) print("okt(not-tuned): {0}".format(okt.morphs("중요 계정잔액 및 공시내용에 대해 질문 분석적절차 표본점검 문서검증 재계산 등"))) ``` <h3>마. 형태소 분석기 TF-IDF 로직 반영</h3> (Narrative) ``` from sklearn.feature_extraction.text import TfidfVectorizer tfidf_vect = TfidfVectorizer(tokenizer=tokenizer) tfidf_vect.fit(df["DATA"]) ``` <h3>바. Supervised Machine Learning - Random Forest</h3> ``` df_train = pd.read_excel("train_supervised.xlsx") data = [] for i in list(df_train["DATA"]): temp = re.sub('[-=+,#/\?:^$.@*\"※~&%ㆍ!』\\‘|\(\)\[\]\<\>`\'…》]', '', i) temp = ''.join(temp.split()) temp = temp.replace("의견","$$").replace("과목","%%").replace("효과성","##").replace("결과","@@") stopword = ["유의적", "및", "의", "등", "에대한", "에대해", "에대하여", "와", "과", "를", "을"] for word in stopword: temp = temp.replace(word," ") temp = temp.replace("$$", "의견").replace("%%", "과목").replace("##", "효과성").replace("@@","결과") data.append(temp) df_train["DATA"] = data tfidf_matrix_train = tfidf_vect.transform(df_train["DATA"]) y_train = df_train["LABEL"] from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score dt_clf_rfc = RandomForestClassifier(random_state=156) parameters_rfc = {'max_depth':[30, 40, 50], 'n_estimators':[40, 50, 60]} grid_dtree_rfc = GridSearchCV(dt_clf_rfc, param_grid=parameters_rfc, cv=20, refit=True) grid_dtree_rfc.fit(tfidf_matrix_train, y_train) estimator_rfc = grid_dtree_rfc.best_params_ print(estimator_rfc) pred_rfc = grid_dtree_rfc.predict(tfidf_matrix_train) accuracy_rfc = accuracy_score(y_train, pred_rfc) print(accuracy_rfc) df_train["pred"] = pred_rfc df_train.to_excel("train_pred.xlsx") ``` 다음 단계로 넘어가기 전에 예측 데이터를 추출하여 지도 단계의 오류를 train_supervised 파일에 반영하여 모델을 다시 생성하는 절차를 반복한다. ``` df_test = pd.read_excel("test_supervised.xlsx") data = [] for i in list(df_test["DATA"]): temp = re.sub('[-=+,#/\?:^$.@*\"※~&%ㆍ!』\\‘|\(\)\[\]\<\>`\'…》]', '', i) temp = ''.join(temp.split()) temp = temp.replace("의견","$$").replace("과목","%%").replace("효과성","##").replace("결과","@@") stopword = ["유의적", "및", "의", "등", "에대한", "에대해", "에대하여", "와", "과", "를", "을"] for word in stopword: temp = temp.replace(word," ") temp = temp.replace("$$", "의견").replace("%%", "과목").replace("##", "효과성").replace("@@","결과") data.append(temp) df_test["DATA"] = data tfidf_matrix_test = tfidf_vect.transform(df_test["DATA"]) y_test = df_test["LABEL"] pred = grid_dtree_rfc.predict(tfidf_matrix_test) accuracy = accuracy_score(y_test, pred) print(accuracy) df_test["pred"] = pred df_test df_test.to_excel("test_pred.xlsx") ``` 600개 학습모형의 TEST 결과 Accuracy score가 89.5%로 개선이 필요하였다. 분류 결과는 아래 표에서 확인할 수 있다. 이에 따라, 원자료에서 Train 데이터에 임의선택한 3번 레이블 300개와 1번 레이블 100개를 추가하여 학습모형을 개선하였다. <table style="table-layout: left;"> <tr> <th>Supervised</th> <th>Prediction</th> <th>Count</th> <th>Percentage</th> </tr> <tr> <td>1</td> <td>1</td> <td>215</td> <td>91</td> </tr> <tr> <td>1</td> <td>2</td> <td>21</td> <td>9</td> </tr> <tr> <td>1</td> <td>3</td> <td>1</td> <td>0</td> </tr> <tr> <td></td> <td>소계</td> <td>237</td> <td>100</td> </tr> <tr> <td>2</td> <td>1</td> <td>16</td> <td>5</td> </tr> <tr> <td>2</td> <td>2</td> <td>312</td> <td>95</td> </tr> <tr> <td>2</td> <td>3</td> <td>1</td> <td>0</td> </tr> <tr> <td></td> <td>소계</td> <td>329</td> <td>100</td> </tr> <tr> <td>3</td> <td>1</td> <td>9</td> <td>26</td> </tr> <tr> <td>3</td> <td>2</td> <td>14</td> <td>41</td> </tr> <tr> <td>3</td> <td>3</td> <td>11</td> <td>32</td> </tr> <tr> <td></td> <td>소계</td> <td>34</td> <td>100</td> </tr> </table> 데이터를 추가한 학습 DATA로 모형을 다시 학습한다. ``` df_train = pd.read_excel("train_supervised_second.xlsx") data = [] for i in list(df_train["DATA"]): temp = re.sub('[-=+,#/\?:^$.@*\"※~&%ㆍ!』\\‘|\(\)\[\]\<\>`\'…》]', '', i) temp = ''.join(temp.split()) temp = temp.replace("의견","$$").replace("과목","%%").replace("효과성","##").replace("결과","@@") stopword = ["유의적", "및", "의", "등", "에대한", "에대해", "에대하여", "와", "과", "를", "을"] for word in stopword: temp = temp.replace(word," ") temp = temp.replace("$$", "의견").replace("%%", "과목").replace("##", "효과성").replace("@@","결과") data.append(temp) df_train["DATA"] = data tfidf_matrix_train = tfidf_vect.transform(df_train["DATA"]) y_train = df_train["LABEL"] from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score dt_clf_rfc = RandomForestClassifier(random_state=156) parameters_rfc = {'max_depth':[60, 70, 80], 'n_estimators':[50, 60, 70]} grid_dtree_rfc = GridSearchCV(dt_clf_rfc, param_grid=parameters_rfc, cv=20, refit=True) grid_dtree_rfc.fit(tfidf_matrix_train, y_train) estimator_rfc = grid_dtree_rfc.best_params_ print(estimator_rfc) pred_rfc = grid_dtree_rfc.predict(tfidf_matrix_train) accuracy_rfc = accuracy_score(y_train, pred_rfc) print(accuracy_rfc) df_train["pred"] = pred_rfc df_train.to_excel("train_second_pred.xlsx") ``` Test data에 다시 확인한다. 약간 개선되었다. (0.9 → 0.92) ``` pred = grid_dtree_rfc.predict(tfidf_matrix_test) accuracy = accuracy_score(y_test, pred) print(accuracy) df_test["pred_second"] = pred df_test.to_excel("test_pred.xlsx") ``` <h3>사. Unlabelled Data 분류 작업</h3> ``` df_pred = pd.read_excel("predict.xlsx") df_pred.head() data = [] for i in list(df_pred["ACTIVITY"]): temp = re.sub('[-=+,#/\?:^$.@*\"※~&%ㆍ!』\\‘|\(\)\[\]\<\>`\'…》]', '', str(i)) temp = ''.join(temp.split()) temp = temp.replace("의견","$$").replace("과목","%%").replace("효과성","##").replace("결과","@@") stopword = ["유의적", "및", "의", "등", "에대한", "에대해", "에대하여", "와", "과", "를", "을"] for word in stopword: temp = temp.replace(word," ") temp = temp.replace("$$", "의견").replace("%%", "과목").replace("##", "효과성").replace("@@","결과") data.append(temp) df_pred["ACTIVITY"] = data tfidf_matrix_pred = tfidf_vect.transform(df_pred["ACTIVITY"]) pred = grid_dtree_rfc.predict(tfidf_matrix_pred) df_pred["LABEL"] = pred df_pred.head() df_pred.to_excel("predict_labelled.xlsx") ```
github_jupyter
import pandas as pd import re df = pd.read_excel("data.xlsx") df.head() data = [] for i in list(df["DATA"]): temp = re.sub('[-=+,#/\?:^$.@*\"※~&%ㆍ!』\\‘|\(\)\[\]\<\>`\'…》]', '', str(i)) temp = ''.join(temp.split()) temp = temp.replace("의견","$$").replace("과목","%%").replace("효과성","##").replace("결과","@@") stopword = ["유의적", "및", "의", "등", "에대한", "에대해", "에대하여", "와", "과", "를", "을"] for word in stopword: temp = temp.replace(word," ") temp = temp.replace("$$", "의견").replace("%%", "과목").replace("##", "효과성").replace("@@","결과") data.append(temp) df["DATA"] = data df.head() df = df.sample(frac=1).reset_index(drop=True) df.head() train, test = df.sample(n=600), df.sample(n=600) train.to_excel("train.xlsx") test.to_excel("test.xlsx") from konlpy.tag import Okt okt = Okt() print(okt.morphs("중요 계정잔액 및 공시내용에 대해 질문 분석적절차 표본점검 문서검증 재계산 등")) from soynlp import DoublespaceLineCorpus from soynlp.word import WordExtractor from soynlp.tokenizer import MaxScoreTokenizer word_extractor = WordExtractor() word_extractor.train(df["DATA"]) word_score_table = word_extractor.extract() scores = {word:score.cohesion_forward for word, score in word_score_table.items()} tokenizer = MaxScoreTokenizer(scores=scores) tuning = {'중요계정잔액':1, '공시내용':1, '분석적절차':1, '표본점검':1, '문서검증':1, '재계산':1 ,'중요거래유형':1, '내부통제':1, '분석적검토':1, '중간감사':1, '조회서':1, '감사계약':1 ,'입증감사':1, '기말감사':1, '재고자산':1, '실사입회':1, '내부회계관리제도':1, '연결재무제표':1 ,'연결조정분개':1, '계획':1, '이해':1, '투자자본상계':1, '일반감사절차':1, '감사결과':1 ,'반기검토':1, '분기검토':1, '후속기간':1, '통제테스트':1, '잔액검증':1, '입증감사':1 ,'중요한왜곡표시':1, '식별':1, '기초잔액':1, '실증절차':1, '표본테스트':1, '현장감사':1 ,'별도재무제표':1, '경영자주장':1, '설계':1, '운영':1, '주석사항':1, '효과성':1 ,'회계처리':1, '재무제표':1, '전산감사':1 ,'사전검토':1, '계정별':1, '감사보고서':1, '종속기업':1, '수행':1, '금융자산':1 ,'이해및평가':1, '내부통제제도':1, '중요공시절차':1, '테스트':1, 'test':1, '실재성':1 ,'감사절차':1, '주석':1, '성격범위시기':1, '내부회계':1, '피투자회사':1, '지배회사':1 ,'분반기검토':1, '프로세스':1, '거래유형':1, '통제위험':1, '외부조회':1, '경영자':1 ,'계정과목':1, '중요계정':1, '종속회사':1, '내부통제테스트':1, '입증절차':1, '보고서':1 ,'변동사항':1, '업무수행':1, '전기조서':1, '감사위험':1, '예비적':1, '지배기구':1 ,'입증시사':1, '1분기':1, '2분기':1, '3분기':1, '일반기업회계기준':1, '조기입증':1 ,'부문감사인':1, '그룹감사':1, '중요성':1, '초도감사':1, '의견형성':1, '자회사':1} scores.update(tuning) print("soynlp(tuned): {0}".format(tokenizer.tokenize('중요계정잔액및공시내용에대해질문분석적절차표본점검문서검증재계산등)'))) print("okt(not-tuned): {0}".format(okt.morphs("중요 계정잔액 및 공시내용에 대해 질문 분석적절차 표본점검 문서검증 재계산 등"))) from sklearn.feature_extraction.text import TfidfVectorizer tfidf_vect = TfidfVectorizer(tokenizer=tokenizer) tfidf_vect.fit(df["DATA"]) df_train = pd.read_excel("train_supervised.xlsx") data = [] for i in list(df_train["DATA"]): temp = re.sub('[-=+,#/\?:^$.@*\"※~&%ㆍ!』\\‘|\(\)\[\]\<\>`\'…》]', '', i) temp = ''.join(temp.split()) temp = temp.replace("의견","$$").replace("과목","%%").replace("효과성","##").replace("결과","@@") stopword = ["유의적", "및", "의", "등", "에대한", "에대해", "에대하여", "와", "과", "를", "을"] for word in stopword: temp = temp.replace(word," ") temp = temp.replace("$$", "의견").replace("%%", "과목").replace("##", "효과성").replace("@@","결과") data.append(temp) df_train["DATA"] = data tfidf_matrix_train = tfidf_vect.transform(df_train["DATA"]) y_train = df_train["LABEL"] from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score dt_clf_rfc = RandomForestClassifier(random_state=156) parameters_rfc = {'max_depth':[30, 40, 50], 'n_estimators':[40, 50, 60]} grid_dtree_rfc = GridSearchCV(dt_clf_rfc, param_grid=parameters_rfc, cv=20, refit=True) grid_dtree_rfc.fit(tfidf_matrix_train, y_train) estimator_rfc = grid_dtree_rfc.best_params_ print(estimator_rfc) pred_rfc = grid_dtree_rfc.predict(tfidf_matrix_train) accuracy_rfc = accuracy_score(y_train, pred_rfc) print(accuracy_rfc) df_train["pred"] = pred_rfc df_train.to_excel("train_pred.xlsx") df_test = pd.read_excel("test_supervised.xlsx") data = [] for i in list(df_test["DATA"]): temp = re.sub('[-=+,#/\?:^$.@*\"※~&%ㆍ!』\\‘|\(\)\[\]\<\>`\'…》]', '', i) temp = ''.join(temp.split()) temp = temp.replace("의견","$$").replace("과목","%%").replace("효과성","##").replace("결과","@@") stopword = ["유의적", "및", "의", "등", "에대한", "에대해", "에대하여", "와", "과", "를", "을"] for word in stopword: temp = temp.replace(word," ") temp = temp.replace("$$", "의견").replace("%%", "과목").replace("##", "효과성").replace("@@","결과") data.append(temp) df_test["DATA"] = data tfidf_matrix_test = tfidf_vect.transform(df_test["DATA"]) y_test = df_test["LABEL"] pred = grid_dtree_rfc.predict(tfidf_matrix_test) accuracy = accuracy_score(y_test, pred) print(accuracy) df_test["pred"] = pred df_test df_test.to_excel("test_pred.xlsx") df_train = pd.read_excel("train_supervised_second.xlsx") data = [] for i in list(df_train["DATA"]): temp = re.sub('[-=+,#/\?:^$.@*\"※~&%ㆍ!』\\‘|\(\)\[\]\<\>`\'…》]', '', i) temp = ''.join(temp.split()) temp = temp.replace("의견","$$").replace("과목","%%").replace("효과성","##").replace("결과","@@") stopword = ["유의적", "및", "의", "등", "에대한", "에대해", "에대하여", "와", "과", "를", "을"] for word in stopword: temp = temp.replace(word," ") temp = temp.replace("$$", "의견").replace("%%", "과목").replace("##", "효과성").replace("@@","결과") data.append(temp) df_train["DATA"] = data tfidf_matrix_train = tfidf_vect.transform(df_train["DATA"]) y_train = df_train["LABEL"] from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score dt_clf_rfc = RandomForestClassifier(random_state=156) parameters_rfc = {'max_depth':[60, 70, 80], 'n_estimators':[50, 60, 70]} grid_dtree_rfc = GridSearchCV(dt_clf_rfc, param_grid=parameters_rfc, cv=20, refit=True) grid_dtree_rfc.fit(tfidf_matrix_train, y_train) estimator_rfc = grid_dtree_rfc.best_params_ print(estimator_rfc) pred_rfc = grid_dtree_rfc.predict(tfidf_matrix_train) accuracy_rfc = accuracy_score(y_train, pred_rfc) print(accuracy_rfc) df_train["pred"] = pred_rfc df_train.to_excel("train_second_pred.xlsx") pred = grid_dtree_rfc.predict(tfidf_matrix_test) accuracy = accuracy_score(y_test, pred) print(accuracy) df_test["pred_second"] = pred df_test.to_excel("test_pred.xlsx") df_pred = pd.read_excel("predict.xlsx") df_pred.head() data = [] for i in list(df_pred["ACTIVITY"]): temp = re.sub('[-=+,#/\?:^$.@*\"※~&%ㆍ!』\\‘|\(\)\[\]\<\>`\'…》]', '', str(i)) temp = ''.join(temp.split()) temp = temp.replace("의견","$$").replace("과목","%%").replace("효과성","##").replace("결과","@@") stopword = ["유의적", "및", "의", "등", "에대한", "에대해", "에대하여", "와", "과", "를", "을"] for word in stopword: temp = temp.replace(word," ") temp = temp.replace("$$", "의견").replace("%%", "과목").replace("##", "효과성").replace("@@","결과") data.append(temp) df_pred["ACTIVITY"] = data tfidf_matrix_pred = tfidf_vect.transform(df_pred["ACTIVITY"]) pred = grid_dtree_rfc.predict(tfidf_matrix_pred) df_pred["LABEL"] = pred df_pred.head() df_pred.to_excel("predict_labelled.xlsx")
0.165391
0.928344
# Example of loading a custom tree model into SHAP This notebook shows how to pass a custom tree ensemble model into SHAP for explanation. ``` import numpy as np import shap import sklearn import graphviz ``` ## Simple regression tree model Here we define a simple regression tree and then load it into SHAP as a custom model. ``` X,y = shap.datasets.boston() orig_model = sklearn.tree.DecisionTreeRegressor(max_depth=2) orig_model.fit(X, y) dot_data = sklearn.tree.export_graphviz(orig_model, out_file=None, filled=True, rounded=True, special_characters=True) graph = graphviz.Source(dot_data) graph # extract the arrays that define the tree children_left = orig_model.tree_.children_left children_right = orig_model.tree_.children_right children_default = children_right.copy() # because sklearn does not use missing values features = orig_model.tree_.feature thresholds = orig_model.tree_.threshold values = orig_model.tree_.value.reshape(orig_model.tree_.value.shape[0], 1) node_sample_weight = orig_model.tree_.weighted_n_node_samples print(" children_left", children_left) # note that negative children values mean this is a leaf node print(" children_right", children_right) print(" children_default", children_default) print(" features", features) print(" thresholds", thresholds.round(3)) print(" values", values.round(3)) print("node_sample_weight", node_sample_weight) # define a custom tree model tree_dict = { "children_left": children_left, "children_right": children_right, "children_default": children_default, "features": features, "thresholds": thresholds, "values": values, "node_sample_weight": node_sample_weight } model = { "trees": [tree_dict] } explainer = shap.TreeExplainer(model) # Make sure that the ingested SHAP model (a TreeEnsemble object) makes the # same predictions as the original model assert np.abs(explainer.model.predict(X) - orig_model.predict(X)).max() < 1e-4 # make sure the SHAP values sum up to the model output (this is the local accuracy property) assert np.abs(explainer.expected_value + explainer.shap_values(X).sum(1) - orig_model.predict(X)).max() < 1e-4 ``` ## Simple GBM classification model (with 2 trees) Here we define a simple regression tree and then load it into SHAP as a custom model. ``` X2,y2 = shap.datasets.adult() orig_model2 = sklearn.ensemble.GradientBoostingClassifier(n_estimators=2) orig_model2.fit(X2, y2) ``` ### Pull the info of the first tree ``` tree_tmp = orig_model2.estimators_[0][0].tree_ # extract the arrays that define the tree children_left1 = tree_tmp.children_left children_right1 = tree_tmp.children_right children_default1 = children_right1.copy() # because sklearn does not use missing values features1 = tree_tmp.feature thresholds1 = tree_tmp.threshold values1 = tree_tmp.value.reshape(tree_tmp.value.shape[0], 1) node_sample_weight1 = tree_tmp.weighted_n_node_samples print(" children_left1", children_left1) # note that negative children values mean this is a leaf node print(" children_right1", children_right1) print(" children_default1", children_default1) print(" features1", features1) print(" thresholds1", thresholds1.round(3)) print(" values1", values1.round(3)) print("node_sample_weight1", node_sample_weight1) ``` ### Pull the info of the second tree ``` tree_tmp = orig_model2.estimators_[1][0].tree_ # extract the arrays that define the tree children_left2 = tree_tmp.children_left children_right2 = tree_tmp.children_right children_default2 = children_right2.copy() # because sklearn does not use missing values features2 = tree_tmp.feature thresholds2 = tree_tmp.threshold values2 = tree_tmp.value.reshape(tree_tmp.value.shape[0], 1) node_sample_weight2 = tree_tmp.weighted_n_node_samples print(" children_left2", children_left2) # note that negative children values mean this is a leaf node print(" children_right2", children_right2) print(" children_default2", children_default2) print(" features2", features2) print(" thresholds2", thresholds2.round(3)) print(" values2", values2.round(3)) print("node_sample_weight2", node_sample_weight2) ``` ### Create a list of SHAP Trees ``` # define a custom tree model tree_dicts = [ { "children_left": children_left1, "children_right": children_right1, "children_default": children_default1, "features": features1, "thresholds": thresholds1, "values": values1 * orig_model2.learning_rate, "node_sample_weight": node_sample_weight1 }, { "children_left": children_left2, "children_right": children_right2, "children_default": children_default2, "features": features2, "thresholds": thresholds2, "values": values2 * orig_model2.learning_rate, "node_sample_weight": node_sample_weight2 }, ] model2 = { "trees": tree_dicts, "base_offset": orig_model2.init_.prior, "tree_output": "log_odds", "objective": "binary_crossentropy", "input_dtype": np.float32, # this is what type the model uses the input feature data "internal_dtype": np.float64 # this is what type the model uses for values and thresholds } ``` ### Explain the custom model ``` # build a background dataset for us to use based on people near a 0.95 cutoff vs = np.abs(orig_model2.predict_proba(X2)[:,1] - 0.95) inds = np.argsort(vs) inds = inds[:200] # build an explainer that explains the probability output of the model explainer2 = shap.TreeExplainer(model2, X2.iloc[inds,:], feature_dependence="independent", model_output="probability") # Make sure that the ingested SHAP model (a TreeEnsemble object) makes the # same predictions as the original model assert np.abs(explainer2.model.predict(X2, output="probability") - orig_model2.predict_proba(X2)[:,1]).max() < 1e-4 # make sure the sum of the SHAP values equals the model output shap_sum = explainer2.expected_value + explainer2.shap_values(X2.iloc[:,:]).sum(1) assert np.abs(shap_sum - orig_model2.predict_proba(X2)[:,1]).max() < 1e-4 ```
github_jupyter
import numpy as np import shap import sklearn import graphviz X,y = shap.datasets.boston() orig_model = sklearn.tree.DecisionTreeRegressor(max_depth=2) orig_model.fit(X, y) dot_data = sklearn.tree.export_graphviz(orig_model, out_file=None, filled=True, rounded=True, special_characters=True) graph = graphviz.Source(dot_data) graph # extract the arrays that define the tree children_left = orig_model.tree_.children_left children_right = orig_model.tree_.children_right children_default = children_right.copy() # because sklearn does not use missing values features = orig_model.tree_.feature thresholds = orig_model.tree_.threshold values = orig_model.tree_.value.reshape(orig_model.tree_.value.shape[0], 1) node_sample_weight = orig_model.tree_.weighted_n_node_samples print(" children_left", children_left) # note that negative children values mean this is a leaf node print(" children_right", children_right) print(" children_default", children_default) print(" features", features) print(" thresholds", thresholds.round(3)) print(" values", values.round(3)) print("node_sample_weight", node_sample_weight) # define a custom tree model tree_dict = { "children_left": children_left, "children_right": children_right, "children_default": children_default, "features": features, "thresholds": thresholds, "values": values, "node_sample_weight": node_sample_weight } model = { "trees": [tree_dict] } explainer = shap.TreeExplainer(model) # Make sure that the ingested SHAP model (a TreeEnsemble object) makes the # same predictions as the original model assert np.abs(explainer.model.predict(X) - orig_model.predict(X)).max() < 1e-4 # make sure the SHAP values sum up to the model output (this is the local accuracy property) assert np.abs(explainer.expected_value + explainer.shap_values(X).sum(1) - orig_model.predict(X)).max() < 1e-4 X2,y2 = shap.datasets.adult() orig_model2 = sklearn.ensemble.GradientBoostingClassifier(n_estimators=2) orig_model2.fit(X2, y2) tree_tmp = orig_model2.estimators_[0][0].tree_ # extract the arrays that define the tree children_left1 = tree_tmp.children_left children_right1 = tree_tmp.children_right children_default1 = children_right1.copy() # because sklearn does not use missing values features1 = tree_tmp.feature thresholds1 = tree_tmp.threshold values1 = tree_tmp.value.reshape(tree_tmp.value.shape[0], 1) node_sample_weight1 = tree_tmp.weighted_n_node_samples print(" children_left1", children_left1) # note that negative children values mean this is a leaf node print(" children_right1", children_right1) print(" children_default1", children_default1) print(" features1", features1) print(" thresholds1", thresholds1.round(3)) print(" values1", values1.round(3)) print("node_sample_weight1", node_sample_weight1) tree_tmp = orig_model2.estimators_[1][0].tree_ # extract the arrays that define the tree children_left2 = tree_tmp.children_left children_right2 = tree_tmp.children_right children_default2 = children_right2.copy() # because sklearn does not use missing values features2 = tree_tmp.feature thresholds2 = tree_tmp.threshold values2 = tree_tmp.value.reshape(tree_tmp.value.shape[0], 1) node_sample_weight2 = tree_tmp.weighted_n_node_samples print(" children_left2", children_left2) # note that negative children values mean this is a leaf node print(" children_right2", children_right2) print(" children_default2", children_default2) print(" features2", features2) print(" thresholds2", thresholds2.round(3)) print(" values2", values2.round(3)) print("node_sample_weight2", node_sample_weight2) # define a custom tree model tree_dicts = [ { "children_left": children_left1, "children_right": children_right1, "children_default": children_default1, "features": features1, "thresholds": thresholds1, "values": values1 * orig_model2.learning_rate, "node_sample_weight": node_sample_weight1 }, { "children_left": children_left2, "children_right": children_right2, "children_default": children_default2, "features": features2, "thresholds": thresholds2, "values": values2 * orig_model2.learning_rate, "node_sample_weight": node_sample_weight2 }, ] model2 = { "trees": tree_dicts, "base_offset": orig_model2.init_.prior, "tree_output": "log_odds", "objective": "binary_crossentropy", "input_dtype": np.float32, # this is what type the model uses the input feature data "internal_dtype": np.float64 # this is what type the model uses for values and thresholds } # build a background dataset for us to use based on people near a 0.95 cutoff vs = np.abs(orig_model2.predict_proba(X2)[:,1] - 0.95) inds = np.argsort(vs) inds = inds[:200] # build an explainer that explains the probability output of the model explainer2 = shap.TreeExplainer(model2, X2.iloc[inds,:], feature_dependence="independent", model_output="probability") # Make sure that the ingested SHAP model (a TreeEnsemble object) makes the # same predictions as the original model assert np.abs(explainer2.model.predict(X2, output="probability") - orig_model2.predict_proba(X2)[:,1]).max() < 1e-4 # make sure the sum of the SHAP values equals the model output shap_sum = explainer2.expected_value + explainer2.shap_values(X2.iloc[:,:]).sum(1) assert np.abs(shap_sum - orig_model2.predict_proba(X2)[:,1]).max() < 1e-4
0.663996
0.972571
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt from datetime import datetime import time from sklearn.naive_bayes import GaussianNB from sklearn.model_selection import GridSearchCV from sklearn.metrics import confusion_matrix, average_precision_score, f1_score, roc_curve, auc from sklearn.metrics import precision_recall_curve import seaborn as sns; sns.set() sns.set_style('whitegrid') sns.set_palette('Set2') %matplotlib inline ``` # Define Helper Functions ``` def probability_to_label(probabilities, threshold=0.5): probabilities = list(probabilities) th = threshold predictions = [1 if i > th else 0 for i in probabilities] return predictions def plot_cm(y_true, y_pred, title='Confusion Matrix', cmap=plt.cm.Blues): sns.set_style('white') cm = confusion_matrix(y_test, y_pred) cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] # normarlize from sklearn.utils.multiclass import unique_labels classes = unique_labels(y_true, y_pred) fig, ax = plt.subplots() im = ax.imshow(cm, interpolation='nearest', cmap=cmap) ax.figure.colorbar(im, ax=ax) # We want to show all ticks... ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), # ... and label them with the respective list entries xticklabels=classes, yticklabels=classes, title=title, ylabel='True label', xlabel='Predicted label') # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. fmt = '.2f' thresh = cm.max() / 2. for i in range(cm.shape[0]): for j in range(cm.shape[1]): ax.text(j, i, format(cm[i, j], fmt), ha="center", va="center", color="white" if cm[i, j] > thresh else "black") fig.tight_layout() pass def Imp_of_fea(name,weight): import operator import collections dic = dict(zip(name,weight)) key_drop = [k for k in dic.keys() if k == 'icustay_age_group'] for k in key_drop: del dic[k] dic = sorted(dic.items(), key=operator.itemgetter(1)) sorted_dict = collections.OrderedDict(dic) plt.style.use('seaborn-darkgrid') plt.figure(figsize=(5,20)) plt.barh(list(sorted_dict.keys()),list(sorted_dict.values())) plt.title('Importance of features in 30 day mortality') def plot_ROC(fpr, tpr, auc, title = 'ROC curve'): sns.set_style('whitegrid') plt.plot([0, 1], [0, 1], 'k--') plt.plot(fpr, tpr, label=' (AUC = {:.3f})'.format(auc)) plt.xlabel('False positive rate') plt.ylabel('True positive rate') plt.title(title) plt.legend(loc='best') plt.show() pass def plot_PRC(precision, recall, ap, title= 'Precision-Recall Curve '): sns.set_style('whitegrid') plt.figure # plot(result5_1[0], result5_1[1],linestyle = "-", color = 'C0')#,label = 'AUC = %0.2f' % result5_1[3]) plt.plot(recall, precision, lw=2, label='AP = %0.4f' % ap ) plt.xlabel('Recall') plt.ylabel('Precision') plt.title(title) plt.legend(loc="best") # plt.show() return def get_weights(y): zeros = y[y==0].shape[0] ones = y[y==1].shape[0] weight = np.array([zeros, ones])*1.0/y.shape[0] y = np.where(y==0,weight[1],y) y = np.where(y==1,weight[0],y) return y ``` ## Load Data ``` %run LoadData30D.ipynb X_train.head() y_train.head() ``` # Naive Bayes ### Apply SearchGrid Cross Validation ``` param_grid = { 'var_smoothing' : list(np.logspace(1e-1,1e-15,100)) } model = GridSearchCV( estimator = GaussianNB(), param_grid = param_grid, n_jobs = -1, verbose = 3, scoring = 'accuracy', cv = 5 ).fit(X_train,y_train,get_weights(y_train.values)) best_model = model.best_estimator_ ypred = best_model.predict_proba(X_test) np.savetxt("plot metric/y_score_30_GNB.csv", ypred[:,1], delimiter=",") ``` ### Accuracy Score ``` best_model.score(X_test,y_test) ``` ### Importance of Features ``` name = X_train.columns weight = list(abs(best_model.theta_.T[:,1])) Imp_of_fea(name,weight) ``` ### Confusion Matrix ``` ypred_label = probability_to_label(ypred[:,1], threshold=0.5) plot_cm(y_test, ypred_label) ``` ### ROC Curve and AUC ``` fpr, tpr, thresholds = roc_curve(y_test, ypred[:,1]) auc = auc(fpr,tpr) plot_ROC(fpr, tpr, auc, title = 'ROC curve') ``` ### PRC Curve ``` avg_prec = average_precision_score(y_test, ypred_label) prec, recall,_ = precision_recall_curve(y_test,ypred[:,1]) plot_PRC(prec, recall, avg_prec, title= 'Precision-Recall Curve ') ``` ### F1 Score ``` f1_score(y_test,ypred_label) ```
github_jupyter
import pandas as pd import numpy as np import matplotlib.pyplot as plt from datetime import datetime import time from sklearn.naive_bayes import GaussianNB from sklearn.model_selection import GridSearchCV from sklearn.metrics import confusion_matrix, average_precision_score, f1_score, roc_curve, auc from sklearn.metrics import precision_recall_curve import seaborn as sns; sns.set() sns.set_style('whitegrid') sns.set_palette('Set2') %matplotlib inline def probability_to_label(probabilities, threshold=0.5): probabilities = list(probabilities) th = threshold predictions = [1 if i > th else 0 for i in probabilities] return predictions def plot_cm(y_true, y_pred, title='Confusion Matrix', cmap=plt.cm.Blues): sns.set_style('white') cm = confusion_matrix(y_test, y_pred) cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] # normarlize from sklearn.utils.multiclass import unique_labels classes = unique_labels(y_true, y_pred) fig, ax = plt.subplots() im = ax.imshow(cm, interpolation='nearest', cmap=cmap) ax.figure.colorbar(im, ax=ax) # We want to show all ticks... ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), # ... and label them with the respective list entries xticklabels=classes, yticklabels=classes, title=title, ylabel='True label', xlabel='Predicted label') # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. fmt = '.2f' thresh = cm.max() / 2. for i in range(cm.shape[0]): for j in range(cm.shape[1]): ax.text(j, i, format(cm[i, j], fmt), ha="center", va="center", color="white" if cm[i, j] > thresh else "black") fig.tight_layout() pass def Imp_of_fea(name,weight): import operator import collections dic = dict(zip(name,weight)) key_drop = [k for k in dic.keys() if k == 'icustay_age_group'] for k in key_drop: del dic[k] dic = sorted(dic.items(), key=operator.itemgetter(1)) sorted_dict = collections.OrderedDict(dic) plt.style.use('seaborn-darkgrid') plt.figure(figsize=(5,20)) plt.barh(list(sorted_dict.keys()),list(sorted_dict.values())) plt.title('Importance of features in 30 day mortality') def plot_ROC(fpr, tpr, auc, title = 'ROC curve'): sns.set_style('whitegrid') plt.plot([0, 1], [0, 1], 'k--') plt.plot(fpr, tpr, label=' (AUC = {:.3f})'.format(auc)) plt.xlabel('False positive rate') plt.ylabel('True positive rate') plt.title(title) plt.legend(loc='best') plt.show() pass def plot_PRC(precision, recall, ap, title= 'Precision-Recall Curve '): sns.set_style('whitegrid') plt.figure # plot(result5_1[0], result5_1[1],linestyle = "-", color = 'C0')#,label = 'AUC = %0.2f' % result5_1[3]) plt.plot(recall, precision, lw=2, label='AP = %0.4f' % ap ) plt.xlabel('Recall') plt.ylabel('Precision') plt.title(title) plt.legend(loc="best") # plt.show() return def get_weights(y): zeros = y[y==0].shape[0] ones = y[y==1].shape[0] weight = np.array([zeros, ones])*1.0/y.shape[0] y = np.where(y==0,weight[1],y) y = np.where(y==1,weight[0],y) return y %run LoadData30D.ipynb X_train.head() y_train.head() param_grid = { 'var_smoothing' : list(np.logspace(1e-1,1e-15,100)) } model = GridSearchCV( estimator = GaussianNB(), param_grid = param_grid, n_jobs = -1, verbose = 3, scoring = 'accuracy', cv = 5 ).fit(X_train,y_train,get_weights(y_train.values)) best_model = model.best_estimator_ ypred = best_model.predict_proba(X_test) np.savetxt("plot metric/y_score_30_GNB.csv", ypred[:,1], delimiter=",") best_model.score(X_test,y_test) name = X_train.columns weight = list(abs(best_model.theta_.T[:,1])) Imp_of_fea(name,weight) ypred_label = probability_to_label(ypred[:,1], threshold=0.5) plot_cm(y_test, ypred_label) fpr, tpr, thresholds = roc_curve(y_test, ypred[:,1]) auc = auc(fpr,tpr) plot_ROC(fpr, tpr, auc, title = 'ROC curve') avg_prec = average_precision_score(y_test, ypred_label) prec, recall,_ = precision_recall_curve(y_test,ypred[:,1]) plot_PRC(prec, recall, avg_prec, title= 'Precision-Recall Curve ') f1_score(y_test,ypred_label)
0.799364
0.774839
<a href="https://colab.research.google.com/github/datarobot-community/mlops-examples/blob/master/MLOps%20Agent/Main_Script.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ## MLOps Agent - Python End to End **Author**: Matthew Cohen #### Scope The scope of this Notebook is to provide instructions on how to use DataRobot's MLOps Agents. #### Requirements - Python 3.7.0 - MLOps Agent 6.3.3 Your version might be different but the below procedure should remain the same. ``` #Clone the repository !git clone https://github.com/datarobot-community/mlops-examples #Install needed packages !pip install -r /content/mlops-examples/'MLOps Agent'/requirements.txt ``` ### Configuring the Agent To configure the agent, we just need to define the DataRobot MLOps location and our API token. By default, the agent expects the data to be spooled on the local file system. Make sure that default location (/tmp/ta) exists. The `token` needs to be your personal token found under Developer Tools in your DataRobot instance. The endpoint specified below is the DataRobot trial endpoint but you should change it if needed. ``` import datarobot as dr import os token = "YOUR_API_TOKEN" endpoint = "https://app2.datarobot.com" ## connect to DataRobot platform with python client. client = dr.Client(token, "{}/api/v2".format(endpoint)) mlops_agents_tb = client.get("mlopsInstaller") with open("/content/mlops-examples/MLOps Agent/mlops-agent.tar.gz", "wb") as f: f.write(mlops_agents_tb.content) ``` #### Once it is downloaded....and saved to your local filesystem, open/uncompress the file ``` !tar -xf /content/mlops-examples/'MLOps Agent'/mlops-agent.tar.gz #Save the folder where the whl file is saved with os.popen("ls /content") as pipe: for line in pipe: if line.startswith('datarobot_mlops_package'): mlops_package = line.strip() version = line.strip()[-5:] print(mlops_package) print(version) #Execute command and install mlops-agent os.system('pip install /content/{}/lib/datarobot_mlops-{}-py2.py3-none-any.whl'.format(mlops_package, version)) ``` ### Open Quick Start As noted in comment code from the Deployment Integrations tab above, open to get started with the agent software configuration steps: .../{agent install dir}/docs/html/quickstart.html Edit .../{agent install dir}/conf/mlops.agent.conf.yaml to have this (everything else can stay as default if you want) This file is contains the properties used by the MLOps service. Namely, the DataRobpt host url, your authentication token, the spool to use queue data to send to MLOps. ``` """ # Set your DR host: mlopsURL: "https://app2.datarobot.com" # Set your API token apiToken: "NWQ1NDA3ZTdmNTU1Y2Q......" # Create the spool directory on your system that you want MLOps to use, eg /tmp/ta channelConfigs: - type: "FS_SPOOL" details: {name: "bench", spoolDirectoryPath: "/tmp/ta"} """ !mkdir /tmp/taqw ``` #### Commands to get you started This will allow you to start, get status, and stop the MLOps agent service. You will only need to run start for now. Run status if you want to check on the service. ``` !bash /content/datarobot_mlops_package-6.3.3/bin/start-agent.sh #Change version based on the downloaded file # Shutdown - DON'T RUN THIS CELL, IT'S JUST SHOWING YOU HOW TO SHUTDOWN #!bash datarobot_mlops_package-6.3.3/bin/stop-agent.sh ``` ## Create an MLOps Model Package for a model and deploy it #### Train a simple RandomForestClassifier model to use for this example ``` import pandas as pd import numpy as np import time import csv import pytz import json import yaml import datetime from sklearn.ensemble import RandomForestClassifier TRAINING_DATA = '/content/{}/examples/data/surgical-dataset.csv'.format(mlops_package) df = pd.read_csv(TRAINING_DATA) columns = list(df.columns) arr = df.to_numpy() np.random.shuffle(arr) split_ratio = 0.8 prediction_threshold = 0.5 train_data_len = int(arr.shape[0] * split_ratio) train_data = arr[:train_data_len, :-1] label = arr[:train_data_len, -1] test_data = arr[train_data_len:, :-1] test_df = df[train_data_len:] # train the model clf = RandomForestClassifier(n_estimators=10, max_depth=2, random_state=0) clf.fit(train_data, label) ``` ### Create empty deployment in DataRobot MLOps Using the MLOps client, create a new model package to represent the random forest model we just created. This includes uploading the traning data and enabling data drift. ``` from datarobot.mlops.mlops import MLOps from datarobot.mlops.common.enums import OutputType from datarobot.mlops.connected.client import MLOpsClient from datarobot.mlops.common.exception import DRConnectedException from datarobot.mlops.constants import Constants # Read the model configuration info from the example. This is used to create the model package. with open('/content/{}/examples/model_config/surgical_binary_classification.json'.format(mlops_package), "r") as f: model_info = json.loads(f.read()) model_info # Read the mlops connection info from the provided example with open('/content/{}/conf/mlops.agent.conf.yaml'.format(mlops_package)) as file: # The FullLoader parameter handles the conversion from YAML # scalar values to Python the dictionary format agent_yaml_dict = yaml.load(file, Loader=yaml.FullLoader) MLOPS_URL = agent_yaml_dict['mlopsUrl'] API_TOKEN = agent_yaml_dict['apiToken'] # Create connected client mlops_connected_client = MLOpsClient(MLOPS_URL, API_TOKEN) # Add training_data to model configuration print("Uploading training data - {}. This may take some time...".format(TRAINING_DATA)) dataset_id = mlops_connected_client.upload_dataset(TRAINING_DATA) print("Training dataset uploaded. Catalog ID {}.".format(dataset_id)) model_info["datasets"] = {"trainingDataCatalogId": dataset_id} # Create the model package print('Create model package') model_pkg_id = mlops_connected_client.create_model_package(model_info) model_pkg = mlops_connected_client.get_model_package(model_pkg_id) model_id = model_pkg["modelId"] # Deploy the model package print('Deploy model package') # Give the deployment a name: DEPLOYMENT_NAME="Python binary classification remote model " + str(datetime.datetime.now()) deployment_id = mlops_connected_client.deploy_model_package(model_pkg["id"], DEPLOYMENT_NAME) # Enable data drift tracking print('Enable feature drift') enable_feature_drift = TRAINING_DATA is not None mlops_connected_client.update_deployment_settings(deployment_id, target_drift=True, feature_drift=enable_feature_drift) _ = mlops_connected_client.get_deployment_settings(deployment_id) print("\nDone.") print("DEPLOYMENT_ID=%s, MODEL_ID=%s" % (deployment_id, model_id)) DEPLOYMENT_ID = deployment_id MODEL_ID = model_id ``` ## Run Model Predictions #### Call the external model's predict fuction and send prediction data to MLOps You can find Deployment and Model ID under `Deployments` --> `Monitoring` Tab. The rest of the code can stay as it is. ``` import sys import time import random import pandas as pd from datarobot.mlops.mlops import MLOps from datarobot.mlops.common.enums import OutputType DEPLOYMENT_ID = 'YOUR_DEPLOYMENT_ID' MODEL_ID = 'YOUR_MODEL_ID' CLASS_NAMES = ["1", "0"] # Spool directory path must match the Monitoring Agent path configured by admin. SPOOL_DIR = "/tmp/ta" """ This sample code demonstrates usage of the MLOps library. It does not have real data (or even a real model) and should not be run against a real MLOps service. """ ACTUALS_OUTPUT_FILE = 'actuals.csv' def main(deployment_id, model_id, spool_dir, class_names): """ This is a binary classification algorithm example. User can call the DataRobot MLOps library functions to report statistics. """ # MLOPS: initialize the MLOps instance mlops = MLOps() \ .set_deployment_id(deployment_id) \ .set_model_id(model_id) \ .set_filesystem_spooler(spool_dir) \ .init() # Get predictions start_time = time.time() predictions = clf.predict_proba(test_data).tolist() num_predictions = len(predictions) end_time = time.time() # Get assocation id's for the predictions so we can track them with the actuals def _generate_unique_association_ids(num_samples): ts = time.time() return ["x_{}_{}".format(ts, i) for i in range(num_samples)] association_ids = _generate_unique_association_ids(len(test_data)) # MLOPS: report the number of predictions in the request and the execution time. mlops.report_deployment_stats(num_predictions, end_time - start_time) # MLOPS: report the predictions data: features, predictions, class_names mlops.report_predictions_data(features_df=test_df, predictions=predictions, class_names=class_names, association_ids=association_ids) target_column_name = columns[len(columns) - 1] target_values = [] orig_labels = test_df[target_column_name].tolist() print("Wrote actuals file: %s" % ACTUALS_OUTPUT_FILE) def write_actuals_file(out_filename, test_data_labels, association_ids): """ Generate a CSV file with the association ids and labels, this example uses a dataset that has labels already. In a real use case actuals (labels) will show after prediction is done. :param out_filename: name of csv file :param test_data_labels: actual values (labels) :param association_ids: association id list used for predictions """ with open(out_filename, mode="w") as actuals_csv_file: writer = csv.writer(actuals_csv_file, delimiter=",") writer.writerow( [ Constants.ACTUALS_ASSOCIATION_ID_KEY, Constants.ACTUALS_VALUE_KEY, Constants.ACTUALS_TIMESTAMP_KEY ] ) tz = pytz.timezone("America/Los_Angeles") for (association_id, label) in zip(association_ids, test_data_labels): actual_timestamp = datetime.datetime.now().replace(tzinfo=tz).isoformat() writer.writerow([association_id, "1" if label else "0", actual_timestamp]) # Write csv file with labels and association Id, when output file is provided write_actuals_file(ACTUALS_OUTPUT_FILE, orig_labels, association_ids) # MLOPS: release MLOps resources when finished. mlops.shutdown() main(DEPLOYMENT_ID, MODEL_ID, SPOOL_DIR, CLASS_NAMES) ``` ### Upload actuals back to MLOps ``` def _get_correct_actual_value(deployment_type, value): if deployment_type == "Regression": return float(value) return str(value) def _get_correct_flag_value(value_str): if value_str == "True": return True return False def upload_actuals(): print("Connect MLOps client") mlops_connected_client = MLOpsClient(MLOPS_URL, API_TOKEN) deployment_type = mlops_connected_client.get_deployment_type(DEPLOYMENT_ID) actuals = [] with open(ACTUALS_OUTPUT_FILE, mode="r") as actuals_csv_file: reader = csv.DictReader(actuals_csv_file) for row in reader: actual = {} for key, value in row.items(): if key == Constants.ACTUALS_WAS_ACTED_ON_KEY: value = _get_correct_flag_value(value) if key == Constants.ACTUALS_VALUE_KEY: value = _get_correct_actual_value(deployment_type, value) actual[key] = value actuals.append(actual) if len(actuals) == 10000: mlops_connected_client.submit_actuals(DEPLOYMENT_ID, actuals) actuals = [] # Submit the actuals print("Submit actuals") mlops_connected_client.submit_actuals(DEPLOYMENT_ID, actuals) print("Done.") upload_actuals() ``` ### Stop the mlops service ``` !bash /content/datarobot_mlops_package-6.3.3/bin/stop-agent.sh #Change version based on the downloaded file ```
github_jupyter
#Clone the repository !git clone https://github.com/datarobot-community/mlops-examples #Install needed packages !pip install -r /content/mlops-examples/'MLOps Agent'/requirements.txt import datarobot as dr import os token = "YOUR_API_TOKEN" endpoint = "https://app2.datarobot.com" ## connect to DataRobot platform with python client. client = dr.Client(token, "{}/api/v2".format(endpoint)) mlops_agents_tb = client.get("mlopsInstaller") with open("/content/mlops-examples/MLOps Agent/mlops-agent.tar.gz", "wb") as f: f.write(mlops_agents_tb.content) !tar -xf /content/mlops-examples/'MLOps Agent'/mlops-agent.tar.gz #Save the folder where the whl file is saved with os.popen("ls /content") as pipe: for line in pipe: if line.startswith('datarobot_mlops_package'): mlops_package = line.strip() version = line.strip()[-5:] print(mlops_package) print(version) #Execute command and install mlops-agent os.system('pip install /content/{}/lib/datarobot_mlops-{}-py2.py3-none-any.whl'.format(mlops_package, version)) """ # Set your DR host: mlopsURL: "https://app2.datarobot.com" # Set your API token apiToken: "NWQ1NDA3ZTdmNTU1Y2Q......" # Create the spool directory on your system that you want MLOps to use, eg /tmp/ta channelConfigs: - type: "FS_SPOOL" details: {name: "bench", spoolDirectoryPath: "/tmp/ta"} """ !mkdir /tmp/taqw !bash /content/datarobot_mlops_package-6.3.3/bin/start-agent.sh #Change version based on the downloaded file # Shutdown - DON'T RUN THIS CELL, IT'S JUST SHOWING YOU HOW TO SHUTDOWN #!bash datarobot_mlops_package-6.3.3/bin/stop-agent.sh import pandas as pd import numpy as np import time import csv import pytz import json import yaml import datetime from sklearn.ensemble import RandomForestClassifier TRAINING_DATA = '/content/{}/examples/data/surgical-dataset.csv'.format(mlops_package) df = pd.read_csv(TRAINING_DATA) columns = list(df.columns) arr = df.to_numpy() np.random.shuffle(arr) split_ratio = 0.8 prediction_threshold = 0.5 train_data_len = int(arr.shape[0] * split_ratio) train_data = arr[:train_data_len, :-1] label = arr[:train_data_len, -1] test_data = arr[train_data_len:, :-1] test_df = df[train_data_len:] # train the model clf = RandomForestClassifier(n_estimators=10, max_depth=2, random_state=0) clf.fit(train_data, label) from datarobot.mlops.mlops import MLOps from datarobot.mlops.common.enums import OutputType from datarobot.mlops.connected.client import MLOpsClient from datarobot.mlops.common.exception import DRConnectedException from datarobot.mlops.constants import Constants # Read the model configuration info from the example. This is used to create the model package. with open('/content/{}/examples/model_config/surgical_binary_classification.json'.format(mlops_package), "r") as f: model_info = json.loads(f.read()) model_info # Read the mlops connection info from the provided example with open('/content/{}/conf/mlops.agent.conf.yaml'.format(mlops_package)) as file: # The FullLoader parameter handles the conversion from YAML # scalar values to Python the dictionary format agent_yaml_dict = yaml.load(file, Loader=yaml.FullLoader) MLOPS_URL = agent_yaml_dict['mlopsUrl'] API_TOKEN = agent_yaml_dict['apiToken'] # Create connected client mlops_connected_client = MLOpsClient(MLOPS_URL, API_TOKEN) # Add training_data to model configuration print("Uploading training data - {}. This may take some time...".format(TRAINING_DATA)) dataset_id = mlops_connected_client.upload_dataset(TRAINING_DATA) print("Training dataset uploaded. Catalog ID {}.".format(dataset_id)) model_info["datasets"] = {"trainingDataCatalogId": dataset_id} # Create the model package print('Create model package') model_pkg_id = mlops_connected_client.create_model_package(model_info) model_pkg = mlops_connected_client.get_model_package(model_pkg_id) model_id = model_pkg["modelId"] # Deploy the model package print('Deploy model package') # Give the deployment a name: DEPLOYMENT_NAME="Python binary classification remote model " + str(datetime.datetime.now()) deployment_id = mlops_connected_client.deploy_model_package(model_pkg["id"], DEPLOYMENT_NAME) # Enable data drift tracking print('Enable feature drift') enable_feature_drift = TRAINING_DATA is not None mlops_connected_client.update_deployment_settings(deployment_id, target_drift=True, feature_drift=enable_feature_drift) _ = mlops_connected_client.get_deployment_settings(deployment_id) print("\nDone.") print("DEPLOYMENT_ID=%s, MODEL_ID=%s" % (deployment_id, model_id)) DEPLOYMENT_ID = deployment_id MODEL_ID = model_id import sys import time import random import pandas as pd from datarobot.mlops.mlops import MLOps from datarobot.mlops.common.enums import OutputType DEPLOYMENT_ID = 'YOUR_DEPLOYMENT_ID' MODEL_ID = 'YOUR_MODEL_ID' CLASS_NAMES = ["1", "0"] # Spool directory path must match the Monitoring Agent path configured by admin. SPOOL_DIR = "/tmp/ta" """ This sample code demonstrates usage of the MLOps library. It does not have real data (or even a real model) and should not be run against a real MLOps service. """ ACTUALS_OUTPUT_FILE = 'actuals.csv' def main(deployment_id, model_id, spool_dir, class_names): """ This is a binary classification algorithm example. User can call the DataRobot MLOps library functions to report statistics. """ # MLOPS: initialize the MLOps instance mlops = MLOps() \ .set_deployment_id(deployment_id) \ .set_model_id(model_id) \ .set_filesystem_spooler(spool_dir) \ .init() # Get predictions start_time = time.time() predictions = clf.predict_proba(test_data).tolist() num_predictions = len(predictions) end_time = time.time() # Get assocation id's for the predictions so we can track them with the actuals def _generate_unique_association_ids(num_samples): ts = time.time() return ["x_{}_{}".format(ts, i) for i in range(num_samples)] association_ids = _generate_unique_association_ids(len(test_data)) # MLOPS: report the number of predictions in the request and the execution time. mlops.report_deployment_stats(num_predictions, end_time - start_time) # MLOPS: report the predictions data: features, predictions, class_names mlops.report_predictions_data(features_df=test_df, predictions=predictions, class_names=class_names, association_ids=association_ids) target_column_name = columns[len(columns) - 1] target_values = [] orig_labels = test_df[target_column_name].tolist() print("Wrote actuals file: %s" % ACTUALS_OUTPUT_FILE) def write_actuals_file(out_filename, test_data_labels, association_ids): """ Generate a CSV file with the association ids and labels, this example uses a dataset that has labels already. In a real use case actuals (labels) will show after prediction is done. :param out_filename: name of csv file :param test_data_labels: actual values (labels) :param association_ids: association id list used for predictions """ with open(out_filename, mode="w") as actuals_csv_file: writer = csv.writer(actuals_csv_file, delimiter=",") writer.writerow( [ Constants.ACTUALS_ASSOCIATION_ID_KEY, Constants.ACTUALS_VALUE_KEY, Constants.ACTUALS_TIMESTAMP_KEY ] ) tz = pytz.timezone("America/Los_Angeles") for (association_id, label) in zip(association_ids, test_data_labels): actual_timestamp = datetime.datetime.now().replace(tzinfo=tz).isoformat() writer.writerow([association_id, "1" if label else "0", actual_timestamp]) # Write csv file with labels and association Id, when output file is provided write_actuals_file(ACTUALS_OUTPUT_FILE, orig_labels, association_ids) # MLOPS: release MLOps resources when finished. mlops.shutdown() main(DEPLOYMENT_ID, MODEL_ID, SPOOL_DIR, CLASS_NAMES) def _get_correct_actual_value(deployment_type, value): if deployment_type == "Regression": return float(value) return str(value) def _get_correct_flag_value(value_str): if value_str == "True": return True return False def upload_actuals(): print("Connect MLOps client") mlops_connected_client = MLOpsClient(MLOPS_URL, API_TOKEN) deployment_type = mlops_connected_client.get_deployment_type(DEPLOYMENT_ID) actuals = [] with open(ACTUALS_OUTPUT_FILE, mode="r") as actuals_csv_file: reader = csv.DictReader(actuals_csv_file) for row in reader: actual = {} for key, value in row.items(): if key == Constants.ACTUALS_WAS_ACTED_ON_KEY: value = _get_correct_flag_value(value) if key == Constants.ACTUALS_VALUE_KEY: value = _get_correct_actual_value(deployment_type, value) actual[key] = value actuals.append(actual) if len(actuals) == 10000: mlops_connected_client.submit_actuals(DEPLOYMENT_ID, actuals) actuals = [] # Submit the actuals print("Submit actuals") mlops_connected_client.submit_actuals(DEPLOYMENT_ID, actuals) print("Done.") upload_actuals() !bash /content/datarobot_mlops_package-6.3.3/bin/stop-agent.sh #Change version based on the downloaded file
0.436382
0.855369
``` import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib import style import matplotlib.ticker as ticker import seaborn as sns from sklearn.datasets import load_boston from sklearn.ensemble import RandomForestClassifier, VotingClassifier, GradientBoostingClassifier from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.metrics import plot_confusion_matrix from sklearn.metrics import classification_report from sklearn.metrics import f1_score, make_scorer from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.model_selection import RepeatedKFold from sklearn.model_selection import GridSearchCV from sklearn.model_selection import ParameterGrid from sklearn.inspection import permutation_importance import multiprocessing from mlxtend.classifier import EnsembleVoteClassifier from sklearn.ensemble import BaggingClassifier from xgboost import XGBClassifier labels = pd.read_csv('../../csv/train_labels.csv') labels.head() values = pd.read_csv('../../csv/train_values.csv') values.T to_be_categorized = ["land_surface_condition", "foundation_type", "roof_type",\ "position", "ground_floor_type", "other_floor_type",\ "plan_configuration", "legal_ownership_status"] for row in to_be_categorized: values[row] = values[row].astype("category") values.info() datatypes = dict(values.dtypes) for row in values.columns: if datatypes[row] != "int64" and datatypes[row] != "int32" and \ datatypes[row] != "int16" and datatypes[row] != "int8": continue if values[row].nlargest(1).item() > 32767 and values[row].nlargest(1).item() < 2**31: values[row] = values[row].astype(np.int32) elif values[row].nlargest(1).item() > 127: values[row] = values[row].astype(np.int16) else: values[row] = values[row].astype(np.int8) labels["building_id"] = labels["building_id"].astype(np.int32) labels["damage_grade"] = labels["damage_grade"].astype(np.int8) labels.info() values['age_is_leq_than_100'] = (values['age'] <= 100).astype(np.int8) # values['age_is_betw_100_and_200'] = ((values['age'] > 100) & (values['age'] <= 200)).astype(np.int8) values['age_is_greater_than_200'] = (values['age'] > 200).astype(np.int8) values[values['age'] >= 100] important_values = values\ .merge(labels, on="building_id") important_values.drop(columns=["building_id"], inplace = True) important_values["geo_level_1_id"] = important_values["geo_level_1_id"].astype("category") important_values df[['geo_level_1_id', 'geo_level_2_id', 'geo_level_3_id', 'damage_grade']] =\ important_values[['geo_level_1_id', 'geo_level_2_id', 'geo_level_3_id', 'damage_grade']] df = important_values.groupby('geo_level_2_id'). df ```
github_jupyter
import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib import style import matplotlib.ticker as ticker import seaborn as sns from sklearn.datasets import load_boston from sklearn.ensemble import RandomForestClassifier, VotingClassifier, GradientBoostingClassifier from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.metrics import plot_confusion_matrix from sklearn.metrics import classification_report from sklearn.metrics import f1_score, make_scorer from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.model_selection import RepeatedKFold from sklearn.model_selection import GridSearchCV from sklearn.model_selection import ParameterGrid from sklearn.inspection import permutation_importance import multiprocessing from mlxtend.classifier import EnsembleVoteClassifier from sklearn.ensemble import BaggingClassifier from xgboost import XGBClassifier labels = pd.read_csv('../../csv/train_labels.csv') labels.head() values = pd.read_csv('../../csv/train_values.csv') values.T to_be_categorized = ["land_surface_condition", "foundation_type", "roof_type",\ "position", "ground_floor_type", "other_floor_type",\ "plan_configuration", "legal_ownership_status"] for row in to_be_categorized: values[row] = values[row].astype("category") values.info() datatypes = dict(values.dtypes) for row in values.columns: if datatypes[row] != "int64" and datatypes[row] != "int32" and \ datatypes[row] != "int16" and datatypes[row] != "int8": continue if values[row].nlargest(1).item() > 32767 and values[row].nlargest(1).item() < 2**31: values[row] = values[row].astype(np.int32) elif values[row].nlargest(1).item() > 127: values[row] = values[row].astype(np.int16) else: values[row] = values[row].astype(np.int8) labels["building_id"] = labels["building_id"].astype(np.int32) labels["damage_grade"] = labels["damage_grade"].astype(np.int8) labels.info() values['age_is_leq_than_100'] = (values['age'] <= 100).astype(np.int8) # values['age_is_betw_100_and_200'] = ((values['age'] > 100) & (values['age'] <= 200)).astype(np.int8) values['age_is_greater_than_200'] = (values['age'] > 200).astype(np.int8) values[values['age'] >= 100] important_values = values\ .merge(labels, on="building_id") important_values.drop(columns=["building_id"], inplace = True) important_values["geo_level_1_id"] = important_values["geo_level_1_id"].astype("category") important_values df[['geo_level_1_id', 'geo_level_2_id', 'geo_level_3_id', 'damage_grade']] =\ important_values[['geo_level_1_id', 'geo_level_2_id', 'geo_level_3_id', 'damage_grade']] df = important_values.groupby('geo_level_2_id'). df
0.443118
0.515681
This is based from a tutorial video by Keith Galli: https://www.youtube.com/watch?v=M9Itm95JzL0 Keith has taken the data from (http://jmcauley.ucsd.edu/data/amazon/) and cleaned the review only for books and the year 2014. He then took 1000 random samples from those subset Keith's data are provided in his Github: https://github.com/keithgalli/sklearn # Load data ``` import json import random # create basic class to be neat class Sentiment: NEGATIVE = "NEGATIVE" NEUTRAL = "NEUTRAL" POSITIVE = "POSITIVE" class Review: def __init__(self, text, score): self.text = text self.score = score self.sentiment = self.get_sentiment() def get_sentiment(self): # assume score 1 and 2 are negative, 3 is neutral, 4 and 5 are positive (amazon review out of 5) if self.score <= 2: return Sentiment.NEGATIVE elif self.score == 3: return Sentiment.NEUTRAL else: return Sentiment.POSITIVE class ReviewContainer: def __init__(self, reviews): self.reviews = reviews def get_text(self): return [x.text for x in self.reviews] def get_sentiment(self): return [x.sentiment for x in self.reviews] def evenly_distribute(self): negative = list(filter(lambda x: x.sentiment == Sentiment.NEGATIVE, self.reviews)) positive = list(filter(lambda x: x.sentiment == Sentiment.POSITIVE, self.reviews)) positive_shrunk = positive[:len(negative)] self.reviews = negative + positive_shrunk random.shuffle(self.reviews) # read the file file_name = "C:/Users/Riyan Aditya/Desktop/ML_learning/project8_explore/Books_small_10000.json" reviews = [] with open(file_name) as f: for line in f: review = json.loads(line) reviews.append(Review(review['reviewText'],review['overall'])) reviews[5].text ``` # Prep data ``` len(reviews) from sklearn.model_selection import train_test_split training, test = train_test_split(reviews,test_size = 0.33, random_state=42) train_container = ReviewContainer(training) test_container = ReviewContainer(test) train_container.evenly_distribute() train_x = train_container.get_text() train_y = train_container.get_sentiment() test_container.evenly_distribute() test_x = test_container.get_text() test_y = test_container.get_sentiment() print(train_y.count(Sentiment.POSITIVE)) print(train_y.count(Sentiment.NEGATIVE)) ``` ## Bag of words vectorisation ``` from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer # apply bag of words vectorisation vectorizer = TfidfVectorizer() train_x_vectors = vectorizer.fit_transform(train_x) test_x_vectors = vectorizer.transform(test_x) print(train_x[0]) print(train_x_vectors[0]) ``` # classification ## Linear SVM ``` from sklearn import svm clf_svm = svm.SVC(kernel='linear') clf_svm.fit(train_x_vectors, train_y) test_x[0] test_x_vectors[0] clf_svm.predict(test_x_vectors[0]) ``` ## Decision Tree ``` from sklearn.tree import DecisionTreeClassifier clf_dec = DecisionTreeClassifier() clf_dec.fit(train_x_vectors, train_y) clf_dec.predict(test_x_vectors[0]) ``` ## Naive Bayes ``` from sklearn.naive_bayes import GaussianNB clf_gnb = GaussianNB() clf_gnb.fit(train_x_vectors.toarray(), train_y) clf_gnb.predict(test_x_vectors[0].toarray()) ``` ## Logistic Regression ``` from sklearn.linear_model import LogisticRegression clf_log = LogisticRegression() clf_log.fit(train_x_vectors.toarray(), train_y) clf_log.predict(test_x_vectors[0].toarray()) ``` # Evaluation ## mean accuracy ``` clf_svm.score(test_x_vectors, test_y) clf_dec.score(test_x_vectors, test_y) clf_gnb.score(test_x_vectors.toarray(), test_y) clf_log.score(test_x_vectors, test_y) ``` ## f1 score ``` from sklearn.metrics import f1_score f1_score(test_y, clf_svm.predict(test_x_vectors), average = None, labels=[Sentiment.POSITIVE, Sentiment.NEGATIVE]) ``` This means the model is good for positive, but bad for neutral and negative ``` f1_score(test_y, clf_dec.predict(test_x_vectors), average = None, labels=[Sentiment.POSITIVE, Sentiment.NEGATIVE]) f1_score(test_y, clf_gnb.predict(test_x_vectors.toarray()), average = None, labels=[Sentiment.POSITIVE, Sentiment.NEGATIVE]) f1_score(test_y, clf_log.predict(test_x_vectors), average = None, labels=[Sentiment.POSITIVE, Sentiment.NEGATIVE]) ``` All the model pretty much only good for positive reviews ## Lets investigate ``` test_y.count(Sentiment.POSITIVE) ``` This suggests our model are bias for those positive label ``` test_y.count(Sentiment.NEGATIVE) ``` Lets use bigger dataset. 10000 random sample. Reload the data with the 10,000 samples Model slightly better after we use bigger training test and make it equal to 50-50 between positive and negative Then we notice that the test set is not evenly distributed. Lets make it the same Ok thats better now after we make the test set 50-50 too ``` test_set = ['very brilliant', "bad book do not buy", "horrible waste of time"] new_test = vectorizer.transform(test_set) clf_svm.predict(new_test) ``` Probably did not know what brilliant is ## Use Gridsearch ``` from sklearn.model_selection import GridSearchCV parameters ={'kernel':('linear','rbf'), 'C': (1,4,8,16,32)} svc = svm.SVC() clf = GridSearchCV(svc, parameters, cv = 5) clf.fit(train_x_vectors, train_y) clf.best_params_ ``` the best param already the one we started with anyway # saving model ``` import pickle with open('sentiment_classifier.pkl','wb') as f: pickle.dump(clf, f) # to load with open('sentiment_classifier.pkl','rb') as f: loaded_clf = pickle.load(f) loaded_clf.predict(test_x_vectors[0]) ```
github_jupyter
import json import random # create basic class to be neat class Sentiment: NEGATIVE = "NEGATIVE" NEUTRAL = "NEUTRAL" POSITIVE = "POSITIVE" class Review: def __init__(self, text, score): self.text = text self.score = score self.sentiment = self.get_sentiment() def get_sentiment(self): # assume score 1 and 2 are negative, 3 is neutral, 4 and 5 are positive (amazon review out of 5) if self.score <= 2: return Sentiment.NEGATIVE elif self.score == 3: return Sentiment.NEUTRAL else: return Sentiment.POSITIVE class ReviewContainer: def __init__(self, reviews): self.reviews = reviews def get_text(self): return [x.text for x in self.reviews] def get_sentiment(self): return [x.sentiment for x in self.reviews] def evenly_distribute(self): negative = list(filter(lambda x: x.sentiment == Sentiment.NEGATIVE, self.reviews)) positive = list(filter(lambda x: x.sentiment == Sentiment.POSITIVE, self.reviews)) positive_shrunk = positive[:len(negative)] self.reviews = negative + positive_shrunk random.shuffle(self.reviews) # read the file file_name = "C:/Users/Riyan Aditya/Desktop/ML_learning/project8_explore/Books_small_10000.json" reviews = [] with open(file_name) as f: for line in f: review = json.loads(line) reviews.append(Review(review['reviewText'],review['overall'])) reviews[5].text len(reviews) from sklearn.model_selection import train_test_split training, test = train_test_split(reviews,test_size = 0.33, random_state=42) train_container = ReviewContainer(training) test_container = ReviewContainer(test) train_container.evenly_distribute() train_x = train_container.get_text() train_y = train_container.get_sentiment() test_container.evenly_distribute() test_x = test_container.get_text() test_y = test_container.get_sentiment() print(train_y.count(Sentiment.POSITIVE)) print(train_y.count(Sentiment.NEGATIVE)) from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer # apply bag of words vectorisation vectorizer = TfidfVectorizer() train_x_vectors = vectorizer.fit_transform(train_x) test_x_vectors = vectorizer.transform(test_x) print(train_x[0]) print(train_x_vectors[0]) from sklearn import svm clf_svm = svm.SVC(kernel='linear') clf_svm.fit(train_x_vectors, train_y) test_x[0] test_x_vectors[0] clf_svm.predict(test_x_vectors[0]) from sklearn.tree import DecisionTreeClassifier clf_dec = DecisionTreeClassifier() clf_dec.fit(train_x_vectors, train_y) clf_dec.predict(test_x_vectors[0]) from sklearn.naive_bayes import GaussianNB clf_gnb = GaussianNB() clf_gnb.fit(train_x_vectors.toarray(), train_y) clf_gnb.predict(test_x_vectors[0].toarray()) from sklearn.linear_model import LogisticRegression clf_log = LogisticRegression() clf_log.fit(train_x_vectors.toarray(), train_y) clf_log.predict(test_x_vectors[0].toarray()) clf_svm.score(test_x_vectors, test_y) clf_dec.score(test_x_vectors, test_y) clf_gnb.score(test_x_vectors.toarray(), test_y) clf_log.score(test_x_vectors, test_y) from sklearn.metrics import f1_score f1_score(test_y, clf_svm.predict(test_x_vectors), average = None, labels=[Sentiment.POSITIVE, Sentiment.NEGATIVE]) f1_score(test_y, clf_dec.predict(test_x_vectors), average = None, labels=[Sentiment.POSITIVE, Sentiment.NEGATIVE]) f1_score(test_y, clf_gnb.predict(test_x_vectors.toarray()), average = None, labels=[Sentiment.POSITIVE, Sentiment.NEGATIVE]) f1_score(test_y, clf_log.predict(test_x_vectors), average = None, labels=[Sentiment.POSITIVE, Sentiment.NEGATIVE]) test_y.count(Sentiment.POSITIVE) test_y.count(Sentiment.NEGATIVE) test_set = ['very brilliant', "bad book do not buy", "horrible waste of time"] new_test = vectorizer.transform(test_set) clf_svm.predict(new_test) from sklearn.model_selection import GridSearchCV parameters ={'kernel':('linear','rbf'), 'C': (1,4,8,16,32)} svc = svm.SVC() clf = GridSearchCV(svc, parameters, cv = 5) clf.fit(train_x_vectors, train_y) clf.best_params_ import pickle with open('sentiment_classifier.pkl','wb') as f: pickle.dump(clf, f) # to load with open('sentiment_classifier.pkl','rb') as f: loaded_clf = pickle.load(f) loaded_clf.predict(test_x_vectors[0])
0.503906
0.878679
# Project Setup * This section describes how to create and configure a project * This is the same as creating a new project in the editor and going through all of the steps. * When a user creates a project with client.create_project() the project is not ready for labeling. * An ontology must be set * datasets must be attached ``` !pip install labelbox from labelbox import Client, Project, LabelingFrontend from labelbox.schema.ontology import Tool, OntologyBuilder from getpass import getpass import os # If you don't want to give google access to drive you can skip this cell # and manually set `API_KEY` below. COLAB = "google.colab" in str(get_ipython()) if COLAB: !pip install colab-env -qU from colab_env import envvar_handler envvar_handler.envload() API_KEY = os.environ.get("LABELBOX_API_KEY") if not os.environ.get("LABELBOX_API_KEY"): API_KEY = getpass("Please enter your labelbox api key") if COLAB: envvar_handler.add_env("LABELBOX_API_KEY", API_KEY) # Set this to a project that is already set up PROJECT_ID = "ckm4xyfncfgja0760vpfdxoro" # Only update this if you have an on-prem deployment ENDPOINT = "https://api.labelbox.com/graphql" client = Client(api_key=API_KEY, endpoint=ENDPOINT) ``` ### Identify project, dataset, and ontology * Pick the project to setup * Dataset(s) to attach to that project * Configure the ontology for the project ``` # Use bounding boxes to label cats ontology_builder = OntologyBuilder( tools=[Tool(name="cat", tool=Tool.Type.BBOX)]) project = client.create_project(name="my_new_project") dataset = client.create_dataset(name="my_new_dataset") # Add data_rows since this is a new dataset (see basics/data_rows.ipynb for more information on this) test_img_url = "https://raw.githubusercontent.com/Labelbox/labelbox-python/develop/examples/assets/2560px-Kitano_Street_Kobe01s5s4110.jpg" dataset.create_data_row(row_data=test_img_url) # Unless you are using a custom editor you should always use the following editor: editor = next( client.get_labeling_frontends(where=LabelingFrontend.name == "Editor")) # Note that you can use any dataset or ontology even if they already exist. existing_project = client.get_project(PROJECT_ID) # We are not using this, but it is possible to copy the ontology to the new project ontology = existing_project.ontology() ``` ### Setup and attach dataset * Setting up a project will add an ontology and will enable labeling to begin * Attaching dataset(s) will add all data_rows belonging to the dataset to the queue. ``` project.setup(editor, ontology_builder.asdict()) # Could also do if ontology is a normalized ontology # project.setup(editor, ontology.normalized) # Run this for each dataset we want to attach project.datasets.connect(dataset) # project.datasets.connect(another_dataset) ``` ### Review ``` # Note setup_complete will be None if it fails. print(project.setup_complete) print(project.ontology) print([ds.name for ds in project.datasets()]) print(f"https://app.labelbox.com/projects/{project.uid}") ```
github_jupyter
!pip install labelbox from labelbox import Client, Project, LabelingFrontend from labelbox.schema.ontology import Tool, OntologyBuilder from getpass import getpass import os # If you don't want to give google access to drive you can skip this cell # and manually set `API_KEY` below. COLAB = "google.colab" in str(get_ipython()) if COLAB: !pip install colab-env -qU from colab_env import envvar_handler envvar_handler.envload() API_KEY = os.environ.get("LABELBOX_API_KEY") if not os.environ.get("LABELBOX_API_KEY"): API_KEY = getpass("Please enter your labelbox api key") if COLAB: envvar_handler.add_env("LABELBOX_API_KEY", API_KEY) # Set this to a project that is already set up PROJECT_ID = "ckm4xyfncfgja0760vpfdxoro" # Only update this if you have an on-prem deployment ENDPOINT = "https://api.labelbox.com/graphql" client = Client(api_key=API_KEY, endpoint=ENDPOINT) # Use bounding boxes to label cats ontology_builder = OntologyBuilder( tools=[Tool(name="cat", tool=Tool.Type.BBOX)]) project = client.create_project(name="my_new_project") dataset = client.create_dataset(name="my_new_dataset") # Add data_rows since this is a new dataset (see basics/data_rows.ipynb for more information on this) test_img_url = "https://raw.githubusercontent.com/Labelbox/labelbox-python/develop/examples/assets/2560px-Kitano_Street_Kobe01s5s4110.jpg" dataset.create_data_row(row_data=test_img_url) # Unless you are using a custom editor you should always use the following editor: editor = next( client.get_labeling_frontends(where=LabelingFrontend.name == "Editor")) # Note that you can use any dataset or ontology even if they already exist. existing_project = client.get_project(PROJECT_ID) # We are not using this, but it is possible to copy the ontology to the new project ontology = existing_project.ontology() project.setup(editor, ontology_builder.asdict()) # Could also do if ontology is a normalized ontology # project.setup(editor, ontology.normalized) # Run this for each dataset we want to attach project.datasets.connect(dataset) # project.datasets.connect(another_dataset) # Note setup_complete will be None if it fails. print(project.setup_complete) print(project.ontology) print([ds.name for ds in project.datasets()]) print(f"https://app.labelbox.com/projects/{project.uid}")
0.55254
0.653175
``` import time import re from selenium import webdriver from selenium.webdriver.common.keys import Keys from bs4 import BeautifulSoup import pandas as pd def extract_vehicle_data(start = 1, end = 50, base_url = 'https://www.milanuncios.com/coches-de-segunda-mano/?pagina='): ''' Connects to Milanuncios and extracts a list of predefined features from each vehicle listed. Parameters: start (int):The first page over which we want to start scrapping. end (int):The last page on which we want to stop scrapping. base_url (str):The base URL to which we want to connect. Returns: vehicle_list(list):List of dictionaries containing information about all the vehicles listed within the execution. ''' try: assert isinstance(start, int), 'The "start" parameter is not an integer.' assert isinstance(end, int), 'The "end" parameter is not an integer.' assert isinstance(base_url, str), 'The "base_url" parameter is not a string.' assert start > 0, 'The "start" parameter has to be greater than 0.' assert start < end, 'The "end" parameter has to be greater than the "start" parameter.' assert re.match('https://www\.milanuncios\.com/.+pagina=$', base_url), 'The "base_url" parameter does not match the expected regex.' except AssertionError as ae: print(ae) # Create an instance of the Chrome web driver browser = webdriver.Chrome() vehicle_list = [] # Iterate over each page until specified in the range function for page in range(start, end + 1): # Form the real URL appending the string value of the current page over which we're iterating browser.get(base_url + str(page)) # Wait a little for the website to load time.sleep(1) # If we're on the first page, dismiss the cookies pop-up if page == start: browser.find_element_by_css_selector('button.sui-AtomButton.sui-AtomButton--primary.sui-AtomButton--solid.sui-AtomButton--center').click() # Calculate the height of the website total_height = int(browser.execute_script("return document.body.scrollHeight")) # Slowly scroll down until you reach the bottom for i in range(1, total_height, 50): browser.execute_script("window.scrollTo(0, {});".format(i)) # Capture the DOM elements of our interest post_elems = browser.find_elements_by_class_name("ma-AdCard-body") # Iterate over each of the elements that we're capturing for post in post_elems: # Sometimes you'll also capture ads. Skip them. if 'OFERTA PATROCINADA' in post.find_element_by_xpath('..').text: print('Skipping ad...') continue # Convert the captured data into BS4 format for simplified extractions html = post.get_attribute('innerHTML') soup = BeautifulSoup(html) # Certain features are not listed in an structured format and we cannot access them directly tags = [x.text for x in soup.find_all('span', {'class': 'ma-AdTag-label'})] # Try to extract the associated features. In certain situations, some will miss. In this situation, we will skip the entire row. # These are all very important for machine learning purpuses and it's rare to find a missing value, so it's preferable to skip it at this point. try: location = soup.find('a', {'class': 'ma-AdCard-subtitleLink'}).text.split(' en ')[1] hp = next(x for x in tags if x.endswith('CV')) mileage = next(x for x in tags if x.endswith('kms') or x.endswith('km')) year = next(x for x in tags if x.isdigit()) transmission = next(x for x in tags if x in ['Manual', 'Automático']) doors = next(x for x in tags if x.endswith('puertas')) price = soup.find('span', {'class': 'ma-AdPrice-value'}).text except: print('Found a problem when gathering information for: [', soup.find('h2').text, '] Skipping...') continue # Populate all the features in a dictionary vehicle = { 'title': soup.find('h2').text, 'location': location, 'url': soup.find('a', {'class': 'ma-AdCard-titleLink'}, href = True)['href'], 'desc': soup.find('p', {'class': 'ma-AdCardDescription-text'}).text, 'price': price, 'seller': soup.find('span', {'class': 'ma-AdTag-label'}).text, 'mileage': mileage, 'year': year, 'transmission': transmission, 'doors': doors, 'hp': hp } # Add the dictionary to the list of vehicles vehicle_list.append(vehicle) return vehicle_list def write_csv_output(vehicle_list, output_name): ''' Takes a list of dictionaries containing vehicle information and writes it in csv format. Parameters: vehicle_list (list):List of dictionaries containing vehicle information. output_name (str): The name of the output csv file. Returns: None ''' try: assert isinstance(vehicle_list, list), 'The "vehicle_list" parameter is not a dictionary.' assert isinstance(output_name, str), 'The "output_name" parameter is not a string.' except AssertionError as ae: print(ae) # Define a Pandas DataFrame containing the vehicle information vehicle_list_df = pd.DataFrame(vehicle_list) # Create the output csv file with the previous data vehicle_list_df.to_csv(f'{output_name}.csv') vehicle_list = extract_vehicle_data(end = 200) write_csv_output(vehicle_list, 'vehicle_listing') ```
github_jupyter
import time import re from selenium import webdriver from selenium.webdriver.common.keys import Keys from bs4 import BeautifulSoup import pandas as pd def extract_vehicle_data(start = 1, end = 50, base_url = 'https://www.milanuncios.com/coches-de-segunda-mano/?pagina='): ''' Connects to Milanuncios and extracts a list of predefined features from each vehicle listed. Parameters: start (int):The first page over which we want to start scrapping. end (int):The last page on which we want to stop scrapping. base_url (str):The base URL to which we want to connect. Returns: vehicle_list(list):List of dictionaries containing information about all the vehicles listed within the execution. ''' try: assert isinstance(start, int), 'The "start" parameter is not an integer.' assert isinstance(end, int), 'The "end" parameter is not an integer.' assert isinstance(base_url, str), 'The "base_url" parameter is not a string.' assert start > 0, 'The "start" parameter has to be greater than 0.' assert start < end, 'The "end" parameter has to be greater than the "start" parameter.' assert re.match('https://www\.milanuncios\.com/.+pagina=$', base_url), 'The "base_url" parameter does not match the expected regex.' except AssertionError as ae: print(ae) # Create an instance of the Chrome web driver browser = webdriver.Chrome() vehicle_list = [] # Iterate over each page until specified in the range function for page in range(start, end + 1): # Form the real URL appending the string value of the current page over which we're iterating browser.get(base_url + str(page)) # Wait a little for the website to load time.sleep(1) # If we're on the first page, dismiss the cookies pop-up if page == start: browser.find_element_by_css_selector('button.sui-AtomButton.sui-AtomButton--primary.sui-AtomButton--solid.sui-AtomButton--center').click() # Calculate the height of the website total_height = int(browser.execute_script("return document.body.scrollHeight")) # Slowly scroll down until you reach the bottom for i in range(1, total_height, 50): browser.execute_script("window.scrollTo(0, {});".format(i)) # Capture the DOM elements of our interest post_elems = browser.find_elements_by_class_name("ma-AdCard-body") # Iterate over each of the elements that we're capturing for post in post_elems: # Sometimes you'll also capture ads. Skip them. if 'OFERTA PATROCINADA' in post.find_element_by_xpath('..').text: print('Skipping ad...') continue # Convert the captured data into BS4 format for simplified extractions html = post.get_attribute('innerHTML') soup = BeautifulSoup(html) # Certain features are not listed in an structured format and we cannot access them directly tags = [x.text for x in soup.find_all('span', {'class': 'ma-AdTag-label'})] # Try to extract the associated features. In certain situations, some will miss. In this situation, we will skip the entire row. # These are all very important for machine learning purpuses and it's rare to find a missing value, so it's preferable to skip it at this point. try: location = soup.find('a', {'class': 'ma-AdCard-subtitleLink'}).text.split(' en ')[1] hp = next(x for x in tags if x.endswith('CV')) mileage = next(x for x in tags if x.endswith('kms') or x.endswith('km')) year = next(x for x in tags if x.isdigit()) transmission = next(x for x in tags if x in ['Manual', 'Automático']) doors = next(x for x in tags if x.endswith('puertas')) price = soup.find('span', {'class': 'ma-AdPrice-value'}).text except: print('Found a problem when gathering information for: [', soup.find('h2').text, '] Skipping...') continue # Populate all the features in a dictionary vehicle = { 'title': soup.find('h2').text, 'location': location, 'url': soup.find('a', {'class': 'ma-AdCard-titleLink'}, href = True)['href'], 'desc': soup.find('p', {'class': 'ma-AdCardDescription-text'}).text, 'price': price, 'seller': soup.find('span', {'class': 'ma-AdTag-label'}).text, 'mileage': mileage, 'year': year, 'transmission': transmission, 'doors': doors, 'hp': hp } # Add the dictionary to the list of vehicles vehicle_list.append(vehicle) return vehicle_list def write_csv_output(vehicle_list, output_name): ''' Takes a list of dictionaries containing vehicle information and writes it in csv format. Parameters: vehicle_list (list):List of dictionaries containing vehicle information. output_name (str): The name of the output csv file. Returns: None ''' try: assert isinstance(vehicle_list, list), 'The "vehicle_list" parameter is not a dictionary.' assert isinstance(output_name, str), 'The "output_name" parameter is not a string.' except AssertionError as ae: print(ae) # Define a Pandas DataFrame containing the vehicle information vehicle_list_df = pd.DataFrame(vehicle_list) # Create the output csv file with the previous data vehicle_list_df.to_csv(f'{output_name}.csv') vehicle_list = extract_vehicle_data(end = 200) write_csv_output(vehicle_list, 'vehicle_listing')
0.64232
0.528168
``` !pip install --upgrade tables !pip install eli5 !pip install xgboost import pandas as pd import numpy as np from sklearn.dummy import DummyRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor import xgboost as xgb from sklearn.metrics import mean_absolute_error as mae from sklearn.model_selection import cross_val_score, KFold import eli5 from eli5.sklearn import PermutationImportance cd "/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_matrix_car" df = pd.read_hdf('data/car.h5') df.shape SUFFIX_CAT = '__cat' for feat in df.columns: if isinstance(df[feat][0],list): continue factorized_values = df[feat].factorize()[0] if SUFFIX_CAT in feat: df[feat] = factorized_values else: df[feat + SUFFIX_CAT]= factorized_values cat_feats = [x for x in df.columns if SUFFIX_CAT in x ] cat_feats = [x for x in cat_feats if 'price' not in x ] len(cat_feats) def run_model (model, feats): X = df[feats].values y = df['price_value'].values scores = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error') return np.mean(scores), np.std(scores) ``` ##DecisionTree ``` run_model (DecisionTreeRegressor(max_depth=5), cat_feats) ``` ##RandomForest ``` run_model (RandomForestRegressor(max_depth=5, n_estimators=50, random_state=0), cat_feats) ``` ##XGBoost ``` run_model (xgb.XGBRegressor(max_depth=5, n_estimators=50, learning_rate=0.1, seed=0), cat_feats) m = xgb.XGBRegressor(max_depth=5, n_estimators=50, learning_rate=0.1, seed=0) m.fit(X,y) imp = PermutationImportance(m, random_state=0).fit(X,y) eli5.show_weights(imp, feature_names = cat_feats) feats = ['param_napęd__cat', 'param_rok-produkcji__cat', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc__cat', 'param_marka-pojazdu__cat', 'feature_kamera-cofania__cat', 'param_typ__cat', 'param_pojemność-skokowa__cat', 'seller_name__cat', 'feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat', 'feature_system-start-stop__cat', 'feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat'] len(feats) len(cat_feats) feats = ['param_napęd__cat', 'param_rok-produkcji__cat', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc__cat', 'param_marka-pojazdu__cat', 'feature_kamera-cofania__cat', 'param_typ__cat', 'param_pojemność-skokowa__cat', 'seller_name__cat', 'feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat', 'feature_system-start-stop__cat', 'feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat'] run_model (xgb.XGBRegressor(max_depth=5, n_estimators=50, learning_rate=0.1, seed=0), feats) df['param_napęd'].unique() df['param_rok-produkcji'].unique() df['param_rok-produkcji__cat'].unique() df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x)) feats = ['param_napęd__cat', 'param_rok-produkcji', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc__cat', 'param_marka-pojazdu__cat', 'feature_kamera-cofania__cat', 'param_typ__cat', 'param_pojemność-skokowa__cat', 'seller_name__cat', 'feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat', 'feature_system-start-stop__cat', 'feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat'] run_model (xgb.XGBRegressor(max_depth=5, n_estimators=50, learning_rate=0.1, seed=0), feats) df['param_moc'].unique() df['param_moc'].map(lambda x: -1 if str(x) == 'None' else int(x.split(' ')[0] ) ) df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x)) df['param_moc'] = df['param_moc'].map(lambda x: -1 if str(x) == 'None' else int(x.split(' ')[0] ) ) feats = ['param_napęd__cat', 'param_rok-produkcji', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc', 'param_marka-pojazdu__cat', 'feature_kamera-cofania__cat', 'param_typ__cat', 'param_pojemność-skokowa__cat', 'seller_name__cat', 'feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat', 'feature_system-start-stop__cat', 'feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat'] run_model (xgb.XGBRegressor(max_depth=5, n_estimators=50, learning_rate=0.1, seed=0), feats) df['param_pojemność-skokowa'].unique() df['param_pojemność-skokowa'] = df['param_pojemność-skokowa'].map(lambda x: -1 if str(x) == 'None' else int(str(x).split('cm')[0].replace(' ','') ) ) feats = ['param_napęd__cat', 'param_rok-produkcji', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc', 'param_marka-pojazdu__cat', 'feature_kamera-cofania__cat', 'param_typ__cat', 'param_pojemność-skokowa', 'seller_name__cat', 'feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat', 'feature_system-start-stop__cat', 'feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat'] run_model (xgb.XGBRegressor(max_depth=5, n_estimators=50, learning_rate=0.1, seed=0), feats) ```
github_jupyter
!pip install --upgrade tables !pip install eli5 !pip install xgboost import pandas as pd import numpy as np from sklearn.dummy import DummyRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor import xgboost as xgb from sklearn.metrics import mean_absolute_error as mae from sklearn.model_selection import cross_val_score, KFold import eli5 from eli5.sklearn import PermutationImportance cd "/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_matrix_car" df = pd.read_hdf('data/car.h5') df.shape SUFFIX_CAT = '__cat' for feat in df.columns: if isinstance(df[feat][0],list): continue factorized_values = df[feat].factorize()[0] if SUFFIX_CAT in feat: df[feat] = factorized_values else: df[feat + SUFFIX_CAT]= factorized_values cat_feats = [x for x in df.columns if SUFFIX_CAT in x ] cat_feats = [x for x in cat_feats if 'price' not in x ] len(cat_feats) def run_model (model, feats): X = df[feats].values y = df['price_value'].values scores = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error') return np.mean(scores), np.std(scores) run_model (DecisionTreeRegressor(max_depth=5), cat_feats) run_model (RandomForestRegressor(max_depth=5, n_estimators=50, random_state=0), cat_feats) run_model (xgb.XGBRegressor(max_depth=5, n_estimators=50, learning_rate=0.1, seed=0), cat_feats) m = xgb.XGBRegressor(max_depth=5, n_estimators=50, learning_rate=0.1, seed=0) m.fit(X,y) imp = PermutationImportance(m, random_state=0).fit(X,y) eli5.show_weights(imp, feature_names = cat_feats) feats = ['param_napęd__cat', 'param_rok-produkcji__cat', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc__cat', 'param_marka-pojazdu__cat', 'feature_kamera-cofania__cat', 'param_typ__cat', 'param_pojemność-skokowa__cat', 'seller_name__cat', 'feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat', 'feature_system-start-stop__cat', 'feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat'] len(feats) len(cat_feats) feats = ['param_napęd__cat', 'param_rok-produkcji__cat', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc__cat', 'param_marka-pojazdu__cat', 'feature_kamera-cofania__cat', 'param_typ__cat', 'param_pojemność-skokowa__cat', 'seller_name__cat', 'feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat', 'feature_system-start-stop__cat', 'feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat'] run_model (xgb.XGBRegressor(max_depth=5, n_estimators=50, learning_rate=0.1, seed=0), feats) df['param_napęd'].unique() df['param_rok-produkcji'].unique() df['param_rok-produkcji__cat'].unique() df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x)) feats = ['param_napęd__cat', 'param_rok-produkcji', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc__cat', 'param_marka-pojazdu__cat', 'feature_kamera-cofania__cat', 'param_typ__cat', 'param_pojemność-skokowa__cat', 'seller_name__cat', 'feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat', 'feature_system-start-stop__cat', 'feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat'] run_model (xgb.XGBRegressor(max_depth=5, n_estimators=50, learning_rate=0.1, seed=0), feats) df['param_moc'].unique() df['param_moc'].map(lambda x: -1 if str(x) == 'None' else int(x.split(' ')[0] ) ) df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x)) df['param_moc'] = df['param_moc'].map(lambda x: -1 if str(x) == 'None' else int(x.split(' ')[0] ) ) feats = ['param_napęd__cat', 'param_rok-produkcji', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc', 'param_marka-pojazdu__cat', 'feature_kamera-cofania__cat', 'param_typ__cat', 'param_pojemność-skokowa__cat', 'seller_name__cat', 'feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat', 'feature_system-start-stop__cat', 'feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat'] run_model (xgb.XGBRegressor(max_depth=5, n_estimators=50, learning_rate=0.1, seed=0), feats) df['param_pojemność-skokowa'].unique() df['param_pojemność-skokowa'] = df['param_pojemność-skokowa'].map(lambda x: -1 if str(x) == 'None' else int(str(x).split('cm')[0].replace(' ','') ) ) feats = ['param_napęd__cat', 'param_rok-produkcji', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc', 'param_marka-pojazdu__cat', 'feature_kamera-cofania__cat', 'param_typ__cat', 'param_pojemność-skokowa', 'seller_name__cat', 'feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat', 'feature_system-start-stop__cat', 'feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat'] run_model (xgb.XGBRegressor(max_depth=5, n_estimators=50, learning_rate=0.1, seed=0), feats)
0.38885
0.587056
# Spectroscopy of a 3 and 6-cavity system 1. **Introduction** 2. **Problem parameters** 3. **Setting up the operators and the Hamiltonian's** 4. **Computing and plotting the eigenfrequencies of the coupled system** 5. **Following the same procedure for a 6 cavity and 2-qubit system** * Setting up the operators, Hamiltonian, and the initial state $\psi_0$ * Computing the energy splitting and plotting the result **Author** : Soumya Shreeram (shreeramsoumya@gmail.com)<br> **Supervisor** : Yu-Chin Chao (ychao@fnal.gov) <br> **Date**: July 2019<br> This script was coded as part of the Helen Edwards Summer Internship program at Fermilab. ## 1. Introduction The hamiltonian for such a system with two qubits with frequencies $v_{Q,1}$, $v_{Q,2}$, and $n$ mode filter can be described as, $$ \hat{H}_{F} = \sum_{i=1}^{n}h\ v_{F}\ \hat{a}^{\dagger}_i \hat{a}_i + \sum_{i=2}^{n}h\ g_{F}\ (\hat{a}^{\dagger}_i \hat{a}_{i-1} + \hat{a}^{\dagger}_{i-1} \hat{a}_i)$$ where $\hat{a}_i$ creates a photon in the $i^{th}$ resonantor, and $g_F$ is the filter-filter coupling. The code calculates the eigen modes for a Hamiltonian containing 3 and 6 cavities. It plots the lowest-state energy splitting observed in the system due to coupling. ``` %matplotlib inline import matplotlib.pyplot as plt plt.rcParams.update({'font.size': 16}) import numpy as np from math import pi from qutip import * ``` ## 2. Problem parameters ``` """------------- FREQUENCIES --------------------""" w_f = 2*pi*7.1 # Resonator/ Filter frequency """------------- COUPLING --------------------""" g_f1 = 2*pi*0.118 # Filter-filter coupling g_f2 = 2*pi*0.344 numF = 3 # number of filters (CASE 1) numF6 = 6 # number of filters (CASE 2) N = 2 # number of fock states kappa = 1.0/0.129 # cavity dissipation rate n_th_a = 0.063 # avg. no. of thermal bath excitation r1 = 0.0075 # qubit relaxation rate r2 = 0.0025 # qubit dephasing rate times = np.linspace(0,100,800) ``` ## 3. Setting up the operators and the Hamiltonian's ``` def numOp(m): """ Computes the number operator @param loweringMat :: lowering matrix operator for a system """ return m.dag()*m def rwaCoupling(m1, m2): return m1.dag()*m2 + m1*m2.dag() # cavity 1, 2, 3 destruction operators a1 = tensor(destroy(N), qeye(N), qeye(N)) a2 = tensor(qeye(N), destroy(N), qeye(N)) a3 = tensor(qeye(N), qeye(N), destroy(N)) # Filter Hamiltonians (refer formula in the Introduction) Hf = numOp(a1) + numOp(a2) + numOp(a3) H_f12 = g_f1*(rwaCoupling(a1, a2) + rwaCoupling(a2, a3)) H_3 = w_f*Hf + H_f12 # Resultant Hamiltonian # collapse operators c_ops = [] def compute(w_qList, H, N, numF): """ Computes the Eigen-vales and states for a given qubit frequency @param w_qList :: range of qubit frequencies @param H_q1 :: Qubit 1 independent Hamiltonian @param N :: Number of Fock states @param sz1 :: z matrix for qubit 1 """ evals_mat = np.zeros((len(w_qList), N**numF)) for i, w_qi in enumerate(w_qList): evals, evecs = H.eigenstates() evals_mat[i,:] = evals return evals_mat def spiltFreq(evals_mat, n1): wf_splittings = [] for n in n1: wf_splittings.append((evals_mat[0, n]-evals_mat[0, 0])/(2*pi)) print(['Split frequency %2d = %.4f'%(i, wf) for i, wf in enumerate(wf_splittings)]) return def setLabels(ax, numF): ax.set_xlabel(r'$\nu_{Q1}$ (GHz)') ax.set_ylabel('Eigen-frequencies (GHz)') ax.set_title('%.1f cavities'%numF) return def plotEnergySplitting(n1, w_q1List, evals_mat, numF): fig,ax = plt.subplots(figsize=(8,5)) for n in n1: ax.plot(w_q1List / (2 * pi), (evals_mat[:, n] - evals_mat[:, 0])/ (2 * pi), lw=2) setLabels(ax, numF) return ``` ## 4. Computing and plotting the eigenfrequencies of the coupled system ``` # range of qubit 1 frequencies w_q1List = np.linspace(5,7.2,100)*2*pi # computes the eigenvalue matrix for ranger of frequencies evals_mat = compute(w_q1List, H_3, N, numF) # plots the energy splitting due to coupling n1 = [1, 2, 3] plotEnergySplitting(n1, w_q1List, evals_mat, numF) # shows the split frequencies spiltFreq(evals_mat, n1) ``` ## 5. Following the same procedure for a 6 cavity and 2-qubit system ### 5.1 Setting up the operators, Hamiltonian, and the initial state $\psi_0$ $$ \hat{H}_{F} = \sum_{i=1}^{n}h\ v_{F}\ \hat{a}^{\dagger}_i \hat{a}_i + \sum_{i=2}^{n}h\ g_{F}\ (\hat{a}^{\dagger}_i \hat{a}_{i-1} + \hat{a}^{\dagger}_{i-1} \hat{a}_i)$$ ``` def sumHamiltonians(some_list): """ Function sums the Hamiltonians in the list @param some_list :: list containing the terms to be summed over @return H :: Hamiltonian with the required sum """ H = 0 for some in some_list: H += numOp(some) return H def sumCoupling(a_list): """ Function sums the Hamiltonians in the list under the RWA approx @param a_list :: list containing the terms to be summed over @return H :: Hamiltonian with the required sum """ H = 0 i = 1 for i in range(len(a_list)): H = rwaCoupling(a_list[i], a_list[i-1]) return H # operators a_1 = tensor(destroy(N), qeye(N), qeye(N), qeye(N), qeye(N), qeye(N)) a_2 = tensor(qeye(N), destroy(N), qeye(N), qeye(N), qeye(N), qeye(N)) a_3 = tensor(qeye(N), qeye(N), destroy(N), qeye(N), qeye(N), qeye(N)) a_4 = tensor(qeye(N), qeye(N), qeye(N), destroy(N), qeye(N), qeye(N)) a_5 = tensor(qeye(N), qeye(N), qeye(N), qeye(N), destroy(N), qeye(N)) a_6 = tensor(qeye(N), qeye(N), qeye(N), qeye(N), qeye(N), destroy(N)) a_list = [a_1, a_2, a_3, a_4, a_5, a_6] # Cavity Hamiltonian and coupling Hamiltonian H_f = w_f*sumHamiltonians(a_list) H_gf = g_f1*(rwaCoupling(a_1, a_2)+rwaCoupling(a_2, a_3)+rwaCoupling(a_3, a_4)+rwaCoupling(a_5, a_6)) # resultant Hamiltonian for 6 cavities H_6 = H_f + H_gf ``` ### 5.2 Computing the energy splitting and plotting the result ``` n2 = [1, 2, 3, 4, 5, 6] # computes the eigenvalue matrix for ranger of frequencies evals_mat_6 = compute(w_q1List, H_6, N, numF6) # plots the energy splitting due to coupling plotEnergySplitting(n2, w_q1List, evals_mat_6, numF6) spiltFreq(evals_mat_6, n2) ```
github_jupyter
%matplotlib inline import matplotlib.pyplot as plt plt.rcParams.update({'font.size': 16}) import numpy as np from math import pi from qutip import * """------------- FREQUENCIES --------------------""" w_f = 2*pi*7.1 # Resonator/ Filter frequency """------------- COUPLING --------------------""" g_f1 = 2*pi*0.118 # Filter-filter coupling g_f2 = 2*pi*0.344 numF = 3 # number of filters (CASE 1) numF6 = 6 # number of filters (CASE 2) N = 2 # number of fock states kappa = 1.0/0.129 # cavity dissipation rate n_th_a = 0.063 # avg. no. of thermal bath excitation r1 = 0.0075 # qubit relaxation rate r2 = 0.0025 # qubit dephasing rate times = np.linspace(0,100,800) def numOp(m): """ Computes the number operator @param loweringMat :: lowering matrix operator for a system """ return m.dag()*m def rwaCoupling(m1, m2): return m1.dag()*m2 + m1*m2.dag() # cavity 1, 2, 3 destruction operators a1 = tensor(destroy(N), qeye(N), qeye(N)) a2 = tensor(qeye(N), destroy(N), qeye(N)) a3 = tensor(qeye(N), qeye(N), destroy(N)) # Filter Hamiltonians (refer formula in the Introduction) Hf = numOp(a1) + numOp(a2) + numOp(a3) H_f12 = g_f1*(rwaCoupling(a1, a2) + rwaCoupling(a2, a3)) H_3 = w_f*Hf + H_f12 # Resultant Hamiltonian # collapse operators c_ops = [] def compute(w_qList, H, N, numF): """ Computes the Eigen-vales and states for a given qubit frequency @param w_qList :: range of qubit frequencies @param H_q1 :: Qubit 1 independent Hamiltonian @param N :: Number of Fock states @param sz1 :: z matrix for qubit 1 """ evals_mat = np.zeros((len(w_qList), N**numF)) for i, w_qi in enumerate(w_qList): evals, evecs = H.eigenstates() evals_mat[i,:] = evals return evals_mat def spiltFreq(evals_mat, n1): wf_splittings = [] for n in n1: wf_splittings.append((evals_mat[0, n]-evals_mat[0, 0])/(2*pi)) print(['Split frequency %2d = %.4f'%(i, wf) for i, wf in enumerate(wf_splittings)]) return def setLabels(ax, numF): ax.set_xlabel(r'$\nu_{Q1}$ (GHz)') ax.set_ylabel('Eigen-frequencies (GHz)') ax.set_title('%.1f cavities'%numF) return def plotEnergySplitting(n1, w_q1List, evals_mat, numF): fig,ax = plt.subplots(figsize=(8,5)) for n in n1: ax.plot(w_q1List / (2 * pi), (evals_mat[:, n] - evals_mat[:, 0])/ (2 * pi), lw=2) setLabels(ax, numF) return # range of qubit 1 frequencies w_q1List = np.linspace(5,7.2,100)*2*pi # computes the eigenvalue matrix for ranger of frequencies evals_mat = compute(w_q1List, H_3, N, numF) # plots the energy splitting due to coupling n1 = [1, 2, 3] plotEnergySplitting(n1, w_q1List, evals_mat, numF) # shows the split frequencies spiltFreq(evals_mat, n1) def sumHamiltonians(some_list): """ Function sums the Hamiltonians in the list @param some_list :: list containing the terms to be summed over @return H :: Hamiltonian with the required sum """ H = 0 for some in some_list: H += numOp(some) return H def sumCoupling(a_list): """ Function sums the Hamiltonians in the list under the RWA approx @param a_list :: list containing the terms to be summed over @return H :: Hamiltonian with the required sum """ H = 0 i = 1 for i in range(len(a_list)): H = rwaCoupling(a_list[i], a_list[i-1]) return H # operators a_1 = tensor(destroy(N), qeye(N), qeye(N), qeye(N), qeye(N), qeye(N)) a_2 = tensor(qeye(N), destroy(N), qeye(N), qeye(N), qeye(N), qeye(N)) a_3 = tensor(qeye(N), qeye(N), destroy(N), qeye(N), qeye(N), qeye(N)) a_4 = tensor(qeye(N), qeye(N), qeye(N), destroy(N), qeye(N), qeye(N)) a_5 = tensor(qeye(N), qeye(N), qeye(N), qeye(N), destroy(N), qeye(N)) a_6 = tensor(qeye(N), qeye(N), qeye(N), qeye(N), qeye(N), destroy(N)) a_list = [a_1, a_2, a_3, a_4, a_5, a_6] # Cavity Hamiltonian and coupling Hamiltonian H_f = w_f*sumHamiltonians(a_list) H_gf = g_f1*(rwaCoupling(a_1, a_2)+rwaCoupling(a_2, a_3)+rwaCoupling(a_3, a_4)+rwaCoupling(a_5, a_6)) # resultant Hamiltonian for 6 cavities H_6 = H_f + H_gf n2 = [1, 2, 3, 4, 5, 6] # computes the eigenvalue matrix for ranger of frequencies evals_mat_6 = compute(w_q1List, H_6, N, numF6) # plots the energy splitting due to coupling plotEnergySplitting(n2, w_q1List, evals_mat_6, numF6) spiltFreq(evals_mat_6, n2)
0.782413
0.985043
``` ### Usar este ejemplo como base, para resolver el problema completo. from sympy import * from sympy import init_printing; init_printing(use_latex = 'mathjax') #var('x l m hbar W c') x, l, m, hbar, W, c = symbols('x l m hbar W c') f_1 = x*(l - x) f_2 = (x**2)*((l - x)**2) f_3 = x*(l - x)*((l/2)-x) f_4 = (x**2)*((l - x)**2)*((l/2)-x) FUNC = [f_1,f_2,f_3,f_4] n=4 c=[] for i in range (n): c.append(Symbol('c_%d' %(i+1))) c = Matrix([var('c1')]) c = c.row_insert(1, Matrix([var('c2')])) c = c.row_insert(2, Matrix([var('c3')])) c = c.row_insert(3, Matrix([var('c4')])) c n = 4 U = ones(n,n) H = zeros(n,n) s = zeros(n,n) for i in range(n): for j in range(n): integrando = (-hbar**2/(2*m))* FUNC[i] *diff(FUNC[j], x, 2) H[i,j]= integrate(integrando, (x, 0, l)) s[i,j] = integrate(FUNC[i]*FUNC[j], (x,0,l)) mult = H - s*W mult determinante = mult.det() determinante e = solve(determinante,W) e = Matrix([e]) e e = e*((l**2*m)/(hbar**2)) e #sorted(e_lista) e = Matrix([sorted(e)]) e e = e*(hbar**2)/(l**2*m) e mult = H - s*e[0] mult res1 = H - s*e[0] res1 res2 = H - s*e[1] res2 res3 = H - s*e[2] res3 res4 = H - s*e[3] res4 c = Matrix(c) c f1 = res1*c f1 f2 = res2*c f2 f3 = res3*c f3 f4 = res4*c f4 r1 = solve(f1, c) r2 = solve(f2, c) r3 = solve(f3, c) r4 = solve(f4, c) r1[c[0]] r = [r1, r2, r3, r4] r EC = Matrix([f1]) EC = EC.col_insert(1,f2) EC = EC.col_insert(2,f3) EC = EC.col_insert(3,f4) EC C = Matrix(c) C CRcol = C[:, :] for a in range(1, len(C)+1): C_val = solve(EC.col(a-1), C) for b in range(1, len(C)+1): if sympify('c'+str(b)) in C_val: CRcol[b-1] = C_val[sympify('c'+str(b))] else: CRcol[b-1] = sympify('c'+str(b)) if a == 1: CR = Matrix([CRcol]) else: CR = CR.col_insert(a-1, CRcol) simplify(CR) CR.T FUNC = Matrix(FUNC) FUNC PHIS = CR.T*FUNC PHIS for i in range (0,4): PHIS[i] = PHIS[i]**2 PHIS #integrate de phis con respecto a x de 0 a l y el resultado; al termino 1, le resto 1 y eso lo resuelvo para c2 y así con PHISI = integrate(PHIS, (x, 0, l)) PHISI c2 = PHISI[0]-1 c2 solve(c2, C[1]) C[1] = solve(c2, C[1]) C[1] c4 = PHISI[3]-1 c4 solve(c4,C[3]) CRcol = C[:, :] for a in range(1, len(C)+1): C_val = solve(EC.col(a-1), C) for b in range(1, len(C)+1): if sympify('c'+str(b)) in C_val: CRcol[b-1] = C_val[sympify('c'+str(b))] else: CRcol[b-1] = sympify('c'+str(b)) if a == 1: CR = Matrix([CRcol]) else: CR = CR.col_insert(a-1, CRcol) simplify(CR) ```
github_jupyter
### Usar este ejemplo como base, para resolver el problema completo. from sympy import * from sympy import init_printing; init_printing(use_latex = 'mathjax') #var('x l m hbar W c') x, l, m, hbar, W, c = symbols('x l m hbar W c') f_1 = x*(l - x) f_2 = (x**2)*((l - x)**2) f_3 = x*(l - x)*((l/2)-x) f_4 = (x**2)*((l - x)**2)*((l/2)-x) FUNC = [f_1,f_2,f_3,f_4] n=4 c=[] for i in range (n): c.append(Symbol('c_%d' %(i+1))) c = Matrix([var('c1')]) c = c.row_insert(1, Matrix([var('c2')])) c = c.row_insert(2, Matrix([var('c3')])) c = c.row_insert(3, Matrix([var('c4')])) c n = 4 U = ones(n,n) H = zeros(n,n) s = zeros(n,n) for i in range(n): for j in range(n): integrando = (-hbar**2/(2*m))* FUNC[i] *diff(FUNC[j], x, 2) H[i,j]= integrate(integrando, (x, 0, l)) s[i,j] = integrate(FUNC[i]*FUNC[j], (x,0,l)) mult = H - s*W mult determinante = mult.det() determinante e = solve(determinante,W) e = Matrix([e]) e e = e*((l**2*m)/(hbar**2)) e #sorted(e_lista) e = Matrix([sorted(e)]) e e = e*(hbar**2)/(l**2*m) e mult = H - s*e[0] mult res1 = H - s*e[0] res1 res2 = H - s*e[1] res2 res3 = H - s*e[2] res3 res4 = H - s*e[3] res4 c = Matrix(c) c f1 = res1*c f1 f2 = res2*c f2 f3 = res3*c f3 f4 = res4*c f4 r1 = solve(f1, c) r2 = solve(f2, c) r3 = solve(f3, c) r4 = solve(f4, c) r1[c[0]] r = [r1, r2, r3, r4] r EC = Matrix([f1]) EC = EC.col_insert(1,f2) EC = EC.col_insert(2,f3) EC = EC.col_insert(3,f4) EC C = Matrix(c) C CRcol = C[:, :] for a in range(1, len(C)+1): C_val = solve(EC.col(a-1), C) for b in range(1, len(C)+1): if sympify('c'+str(b)) in C_val: CRcol[b-1] = C_val[sympify('c'+str(b))] else: CRcol[b-1] = sympify('c'+str(b)) if a == 1: CR = Matrix([CRcol]) else: CR = CR.col_insert(a-1, CRcol) simplify(CR) CR.T FUNC = Matrix(FUNC) FUNC PHIS = CR.T*FUNC PHIS for i in range (0,4): PHIS[i] = PHIS[i]**2 PHIS #integrate de phis con respecto a x de 0 a l y el resultado; al termino 1, le resto 1 y eso lo resuelvo para c2 y así con PHISI = integrate(PHIS, (x, 0, l)) PHISI c2 = PHISI[0]-1 c2 solve(c2, C[1]) C[1] = solve(c2, C[1]) C[1] c4 = PHISI[3]-1 c4 solve(c4,C[3]) CRcol = C[:, :] for a in range(1, len(C)+1): C_val = solve(EC.col(a-1), C) for b in range(1, len(C)+1): if sympify('c'+str(b)) in C_val: CRcol[b-1] = C_val[sympify('c'+str(b))] else: CRcol[b-1] = sympify('c'+str(b)) if a == 1: CR = Matrix([CRcol]) else: CR = CR.col_insert(a-1, CRcol) simplify(CR)
0.129142
0.425486
Lambda School Data Science *Unit 2, Sprint 3, Module 1* --- # Define ML problems You will use your portfolio project dataset for all assignments this sprint. ## Assignment Complete these tasks for your project, and document your decisions. - [ ] Choose your target. Which column in your tabular dataset will you predict? - [ ] Is your problem regression or classification? - [ ] How is your target distributed? - Classification: How many classes? Are the classes imbalanced? - Regression: Is the target right-skewed? If so, you may want to log transform the target. - [ ] Choose your evaluation metric(s). - Classification: Is your majority class frequency >= 50% and < 70% ? If so, you can just use accuracy if you want. Outside that range, accuracy could be misleading. What evaluation metric will you choose, in addition to or instead of accuracy? - Regression: Will you use mean absolute error, root mean squared error, R^2, or other regression metrics? - [ ] Choose which observations you will use to train, validate, and test your model. - Are some observations outliers? Will you exclude them? - Will you do a random split or a time-based split? - [ ] Begin to clean and explore your data. - [ ] Begin to choose which features, if any, to exclude. Would some features "leak" future information? If you haven't found a dataset yet, do that today. [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2) and choose your dataset. Some students worry, ***what if my model isn't “good”?*** Then, [produce a detailed tribute to your wrongness. That is science!](https://twitter.com/nathanwpyle/status/1176860147223867393) ``` %%capture import sys import pandas as pd DATA_PATH = '../data/' df = pd.read_csv(r'C:\Users\Matt\Desktop\unit1project\nba_stats_mod.csv') df.describe() df['Appeared_As_All_Star'].value_counts(normalize=True) df.isnull().sum() df = df.drop(columns=['Unnamed: 0', 'Unnamed: 0.1']) import category_encoders as ce from sklearn.pipeline import make_pipeline from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import train_test_split train, val = train_test_split(df, test_size = 0.2, random_state = 42) target = 'Appeared_As_All_Star' features = train.columns.drop([target]) X_train = train[features] y_train = train[target] features = train.columns.drop([target]) X_train = train[features] y_train = train[target] X_val = val[features] y_val = val[target] pipeline = make_pipeline( ce.OrdinalEncoder(), DecisionTreeClassifier(max_depth=3) ) pipeline.fit(X_train, y_train) print('Validation Accuracy', pipeline.score(X_val, y_val)) ```
github_jupyter
%%capture import sys import pandas as pd DATA_PATH = '../data/' df = pd.read_csv(r'C:\Users\Matt\Desktop\unit1project\nba_stats_mod.csv') df.describe() df['Appeared_As_All_Star'].value_counts(normalize=True) df.isnull().sum() df = df.drop(columns=['Unnamed: 0', 'Unnamed: 0.1']) import category_encoders as ce from sklearn.pipeline import make_pipeline from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import train_test_split train, val = train_test_split(df, test_size = 0.2, random_state = 42) target = 'Appeared_As_All_Star' features = train.columns.drop([target]) X_train = train[features] y_train = train[target] features = train.columns.drop([target]) X_train = train[features] y_train = train[target] X_val = val[features] y_val = val[target] pipeline = make_pipeline( ce.OrdinalEncoder(), DecisionTreeClassifier(max_depth=3) ) pipeline.fit(X_train, y_train) print('Validation Accuracy', pipeline.score(X_val, y_val))
0.2587
0.938294