repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
content
stringlengths
335
154k
sbu-python-summer/python-tutorial
day-5/ngram_models.ipynb
bsd-3-clause
def bigramize(filename): pass """ Explanation: $n$-gram extraction and text generation 1. Bigram extraction Write a function that extracts possible word combinations of the length $2$ from the same file, shore_leave.txt. Note that the last word of one sentence, and the first word of the next one are not a good combination. Splitting the lines into the sentences can be easier using .split() function: it's argument can be the separator that you are intended to use. Define a function bigramize(filename) that will take the name of the file as input, and return the list of bigrams. Open and the file shore_leave.txt Create a list that contains possible bigrams of the sentenses of this text. Do not join the words, we will need them for the next exercises. End of explanation """ bigrams = bigramize("shore_leave.txt") """ Explanation: Test your function: End of explanation """ def ngramize(filename, n): pass """ Explanation: 2. N-gram extraction Generalize the function from the previous exercise from bigrams (sequences of the length $2$) to $n$-grams. Define a function ngramize(filename, n), where $n$ is the length of the sequence that needs to be extracted. End of explanation """ ngrams = ngramize("shore_leave.txt", 3) """ Explanation: Test your function. End of explanation """ def generate(bigrams, word=None, maxlen=20): pass """ Explanation: 3. Bigram-based text generation Write a function that will generate text based on the list of bigrams. Define a function generate(bigrams, word=None, maxlen=20), where bigrams is a list of bigrams, word is the first word in the generated sentence, and maxlen is the maximum length of the resulting sentence. If the initial word is either not provided or is not used non-finally in the text, randomly rewrite that word as any available one. Generate sequence of the length maxlen when possible. If there are no continuations of some word, just return the current sequence, End of explanation """ print(generate(bigrams)) print(generate(bigrams, "flowers")) # shouldn't rewrite the word print(generate(bigrams, "sequential", 10)) # should rewrite the word """ Explanation: Test your function. End of explanation """
ayejay/reading-habits
notebook/Pocket Reading Habits.ipynb
mit
import json import glob import pandas as pd import datetime import requests import matplotlib.pyplot as plt import numpy as np from wordcloud import WordCloud from urllib.parse import urlparse """ Explanation: # Introduction This notebook includes a pattern about my reading habits. <a href="https://getpocket.com">Pocket</a> is a handy tool to keep track of all the nice articles you might miss because of an "important" meeting. I have used data provided by nice API provided by Pocket. I wanted to play more with the data at hand but it is better to start with simple things. In future, I might go to discover some more insights. Lets start with the basics for now. End of explanation """ consumer_key = "" # Consumer key required for Pocket API access_token = "" # Access Token required for Pocket API time_added_limit = 1483228800 # Since when do you want to fetch the data, default 1 Jan 2017 if consumer_key == "" or access_token == "": raise ValueError("Please generate Consumer Key and Access Token.") """ Explanation: First, we need to get data from Pocket through its API. For that, we first need to create the <a href="https://getpocket.com/developer/apps/new">consumer key</a>. Access token can then be generated using the API or <a href="https://reader.fxneumann.de/plugins/oneclickpocket/auth.php">fxneumann's OneClickPocket</a>. End of explanation """ data_points = { "item_id" : "int64", "resolved_title" : "object", "resolved_url" : "object", "time_added" : "int64", "time_read": "int64", "excerpt" : "object", "word_count" : "int32", "is_article": "int32", "status" : "int32" } rawReq = { "consumer_key" : consumer_key, "access_token" : access_token, "sort" : "newest", "state" : "all", "detailType" : "complete" } url = "https://getpocket.com/v3/get" headers = {"Content-Type": "application/json"} counter = 0 items_per_request = 500 df = pd.DataFrame([]) while True: req = rawReq.copy() req.update({"offset": counter*items_per_request, "count": items_per_request}) r = requests.post(url, data=json.dumps(req), headers=headers) rjson = r.json() all_items_found = False if (len(rjson['list']) == 0): break for item in rjson["list"].values(): if (int(item["time_added"]) < time_added_limit): all_items_found = True try: data_arr = pd.DataFrame([[item[x] for x in data_points.keys()]]) except KeyError: continue #Ignoring ill-formed data df = df.append(data_arr) if all_items_found: break counter += 1 df.columns = data_points.keys() for col in df: df[col] = df[col].astype(data_points.get(col)) """ Explanation: It is a good practice to limit the number of API requests/data you are fetching. So, specifying a reasonable time limit is appreciated. Now, time to fetch the numbers. End of explanation """ number_of_top_websites = 10 # Different subdomains are considered as different entities for the sake of simplicity group_by_domain = df["resolved_url"].apply(lambda x:urlparse(x).netloc) top_domains = group_by_domain.value_counts().head(number_of_top_websites) _, ax = plt.subplots(figsize=(12, 10)) y_pos = np.arange(10, 0, -1) ax.set_yticks(y_pos) ax.set_yticklabels(top_domains.index.values) ax.barh(y_pos, top_domains, color="green") ax.set_title("Top Websites for reading") ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) plt.show() """ Explanation: First, I would like to see what websites do I visit most when reading the saved arcticles. End of explanation """ def get_day_and_hour(ts): dt = datetime.datetime.fromtimestamp(ts) return (dt.date().weekday(), dt.hour) def get_weekday_hour_heatmap_data(ts_series): dfa = ts_series.apply(lambda x : pd.Series(get_day_and_hour(x), index=["a", "b"])) dfg = dfa.groupby(["a", "b"]).size().reset_index(name='count') days_hmp = np.zeros((7, 24)) for i in range(7): for j in range(24): if len(dfg[(dfg["a"] == i) & (dfg["b"] == j)]) > 0: days_hmp[i, j] = dfg[(dfg["a"] == i) & (dfg["b"] == j)]["count"].values[0] return days_hmp def plot_weekday_hour_heatmap(days_hmp, ax, vmin, vmax, title): ax.matshow(days_hmp, cmap='summer', vmin=vmin, vmax=vmax) ax.set_xticks(np.arange(0, 24, 1)) ax.set_yticks(np.arange(0, 7, 1)) ax.set_yticklabels(["M", "T", "W", "T", "F", "S", "S"]) ax.set_xticklabels(np.arange(0, 24, 1)) ax.set_xticks(np.arange(-.5, 23, 1), minor=True) ax.set_yticks(np.arange(-.5, 6, 1), minor=True) ax.set_title(title) ax.grid(which="minor", linestyle="-", color='black', linewidth=1) fig, [ax1, ax2] = plt.subplots(nrows=2, figsize=(14, 10)) plot_weekday_hour_heatmap(get_weekday_hour_heatmap_data(df["time_added"]), ax1, 10, 200, "Add") plot_weekday_hour_heatmap(get_weekday_hour_heatmap_data(df[df["status"]!=0]["time_read"]), ax2, 0, 150, "Read") plt.tight_layout() plt.show() """ Explanation: No surprises there. Next, I want to know at what time of week I am most active saving/reading the articles to Pocket. Lets build a cool heatmap to find that out. End of explanation """ df[df["status"] != 0]["word_count"].sum() """ Explanation: No surprises again!! I usually save articles when travelling to the office during the weekdays. As for reading the saved articles, there is no general pattern as reading is spread across the week. Lets go for the number of words that I might have read in the given time period. End of explanation """ len(df[(df["status"] != 0) & (df["is_article"] == 1)]) """ Explanation: What about the number of articles (excluding the videos and other stuff). End of explanation """ read_wordcloud = WordCloud( max_font_size=50, \ background_color='white', width=800, \ height=400).generate(df[df["status"] != 0]["excerpt"].sum()) _, ax = plt.subplots(figsize=(12, 10)) ax.imshow(read_wordcloud, interpolation="bilinear") plt.axis("off") plt.tight_layout() plt.show() """ Explanation: Oh!! Lets end this thing with a nice word cloud. I should get a printed T-Shirt with this cool word cloud. End of explanation """
quantumlib/Cirq
docs/tutorials/google/floquet_calibration_example.ipynb
apache-2.0
try: import cirq except ImportError: print("installing cirq...") !pip install --quiet cirq --pre print("installed cirq.") from typing import Iterable, List, Optional, Sequence import matplotlib.pyplot as plt import numpy as np import os import cirq import cirq_google as cg # Contains the Floquet calibration tools. """ Explanation: Floquet calibration: Example and benchmark <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://quantumai.google/cirq/tutorials/google/floquet_calibration_example"><img src="https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png" />View on QuantumAI</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/quantumlib/Cirq/blob/master/docs/tutorials/google/floquet_calibration_example.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/quantumlib/Cirq/blob/master/docs/tutorials/google/floquet_calibration_example.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/github_logo_1x.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/Cirq/docs/tutorials/google/floquet_calibration_example.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/download_icon_1x.png" />Download notebook</a> </td> </table> This tutorial shows a detailed example and benchmark of Floquet calibration, a calibration technique introduced in the Calibration: Overview and API tutorial. Setup Note: this notebook relies on unreleased Cirq features. If you want to try these features, make sure you install cirq via pip install cirq --pre. End of explanation """ # The Google Cloud Project id to use. project_id = '' #@param {type:"string"} processor_id = "" #@param {type:"string"} from cirq_google.engine.qcs_notebook import get_qcs_objects_for_notebook device_sampler = get_qcs_objects_for_notebook(project_id, processor_id) line_length = 20 if device_sampler.is_simulator else 35 """ Explanation: Note: Leave the project_id and/or processor_id blank to use a noisy simulator. End of explanation """ line = cg.line_on_device(device_sampler.device, line_length) print(line) """ Explanation: Defining the circuit We run Floquet calibration on a circuit which models the evolution of a single fermionic particle on 5 sites, realizing the Hamiltonian: $$ H=\sum_{m=0}^{L-1} J(\sigma_{m}^{+} \sigma_{m+1}^{-} + \sigma_{m}^{+} \sigma_{m+1}^{-}), $$ where $\sigma_{m}^{+}$ ($\sigma_{m}^{-}$) are the raising (lowering) operators, and the single term describes the kinetic energy related to hopping from one site to the other. This quirk circuit shows the evolution of the charge density. This simulation can be looked at as a highly simplified version of the paper from our group, Observation of separated dynamics of charge and spin in the Fermi-Hubbard model. We model only a single fermion in the non-interacting case (with $U=0$). For a single particle, the parasitic controlled phase does not impact the evolution, and we can use a single chain that one can think about it as being in either up or down spin states. The parameter $\theta$ for $K(\theta)$ is fixed to $\pi/4$. To smooth out the inhomogeneities of the quantum chip, we are using the technique of averaging over multiple qubit configurations from this paper. The difference is that we pick a line that we segment and run the same circuit in parallel on each segment corresponding to a different qubit configuration. We measure the charge density at each site index (qubit) by averaging the Z densities (see Fig 2.a for comparison). The physics of this problem for a closed chain (here we use an open chain) has been studied in Accurately computing electronic properties of materials using eigenenergies as well without the complex hopping term, hence we use no Z rotations between the $\sqrt{\text{iSWAP}}$ gates. This paper also describes the Floquet calibration fundamentals in Appendix A. First we use the function cirq_google.line_on_device to return a line of qubits of a specified length. End of explanation """ segment_length = 5 segments = [line[i: i + segment_length] for i in range(0, line_length - segment_length + 1, segment_length)] """ Explanation: This line is now broken up into a number of segments of a specified length (number of qubits). End of explanation """ print(*segments[0]) """ Explanation: For example, the first segment consists of the following qubits. End of explanation """ sqrt_iswap = cirq.ISWAP ** 0.5 def create_linear_chain_segment( segment: Sequence[cirq.Qid], num_trotter_steps: int, ) -> cirq.Circuit: """Returns a linear chain circuit on one segment.""" circuit = cirq.Circuit(cirq.X.on(segment[len(segment) // 2])) # Trotter steps. for step in range(num_trotter_steps): offset = step % 2 circuit += cirq.Moment( [sqrt_iswap.on(a, b) for a, b in zip(segment[offset::2], segment[offset + 1::2])]) return circuit def create_linear_chain_circuit( segments: Sequence[Sequence[cirq.Qid]], num_trotter_steps: int, ) -> cirq.Circuit: """Returns a linear chain circuit to demonstrate Floquet calibration on.""" circuit_segments = [create_linear_chain_segment(segment, num_trotter_steps) for segment in segments] circuit = cirq.Circuit.zip(*circuit_segments) return circuit + cirq.measure(*sum(segments, ()), key='z') """ Explanation: We now implement a number of Trotter steps on each segment in parallel. The middle qubit on each segment is put into the $|1\rangle$ state, then each Trotter step consists of staggered $\sqrt{\text{iSWAP}}$ gates. All qubits are measured in the $Z$ basis at the end of the circuit. For convenience, this code is wrapped in a function. End of explanation """ """Example of the linear chain circuit on one segment of the line.""" num_trotter_steps = 20 circuit_on_segment = create_linear_chain_circuit( segments=[segments[0]], num_trotter_steps=num_trotter_steps, ) print(circuit_on_segment.to_text_diagram(qubit_order=segments[0])) """ Explanation: As an example, we show this circuit on the first segment of the line from above. End of explanation """ """Circuit used to demonstrate Floquet calibration.""" circuit = create_linear_chain_circuit( segments=segments, num_trotter_steps=num_trotter_steps ) """ Explanation: The circuit we will use for Floquet calibration is this same pattern repeated on all segments of the line. End of explanation """ """Simulate one segment on a simulator.""" nreps = 20_000 sim_result = cirq.Simulator().run(circuit_on_segment, repetitions=nreps) """ Explanation: Execution on a simulator To establish a "ground truth," we first simulate a segment on a noiseless simulator. End of explanation """ """Execute the full circuit on a processor without Floquet calibration.""" raw_results = device_sampler.sampler.run(circuit, repetitions=nreps) """ Explanation: Execution on the processor without Floquet calibration We now execute the full circuit on a processor without using Floquet calibration. End of explanation """ def z_density_from_measurements( measurements: np.ndarray, post_select_filling: Optional[int] = 1 ) -> np.ndarray: """Returns density for one segment on the line.""" counts = np.sum(measurements, axis=1, dtype=int) if post_select_filling is not None: errors = np.abs(counts - post_select_filling) counts = measurements[errors == 0] return np.average(counts, axis=0) def z_densities_from_result( result: cirq.Result, segments: Iterable[Sequence[cirq.Qid]], post_select_filling: Optional[int] = 1 ) -> List[np.ndarray]: """Returns densities for each segment on the line.""" measurements = result.measurements['z'] z_densities = [] offset = 0 for segment in segments: z_densities.append(z_density_from_measurements( measurements[:, offset: offset + len(segment)], post_select_filling) ) offset += len(segment) return z_densities """ Explanation: Comparing raw results to simulator results For comparison we will plot densities (average measurement results) on each segment. Such densities are in the interval $[0, 1]$ and more accurate results are closer to the simulator results. To visualize results, we define a few helper functions. Helper functions Note: The functions in this section are just utilities for visualizing results and not essential for Floquet calibration. As such this section can be safely skipped or skimmed. The next cell defines two functions for returning the density (average measurement results) on a segment or on all segments. We can optionally post-select for measurements with a specific filling (particle number) - i.e., discard measurement results which don't obey this expected particle number. End of explanation """ #@title def plot_density( ax: plt.Axes, sim_density: np.ndarray, raw_density: np.ndarray, cal_density: Optional[np.ndarray] = None, raw_errors: Optional[np.ndarray] = None, cal_errors: Optional[np.ndarray] = None, title: Optional[str] = None, show_legend: bool = True, show_ylabel: bool = True, ) -> None: """Plots the density of a single segment for simulated, raw, and calibrated results. """ colors = ["grey", "orange", "green"] alphas = [0.5, 0.8, 0.8] labels = ["sim", "raw", "cal"] # Plot densities. for i, density in enumerate([sim_density, raw_density, cal_density]): if density is not None: ax.plot( range(len(density)), density, "-o" if i == 0 else "o", markersize=11, color=colors[i], alpha=alphas[i], label=labels[i] ) # Plot errors if provided. errors = [raw_errors, cal_errors] densities = [raw_density, cal_density] for i, (errs, dens) in enumerate(zip(errors, densities)): if errs is not None: ax.errorbar( range(len(errs)), dens, errs, linestyle='', color=colors[i + 1], capsize=8, elinewidth=2, markeredgewidth=2 ) # Titles, axes, and legend. ax.set_xticks(list(range(len(sim_density)))) ax.set_xlabel("Qubit index in segment") if show_ylabel: ax.set_ylabel("Density") if title: ax.set_title(title) if show_legend: ax.legend() def plot_densities( sim_density: np.ndarray, raw_densities: Sequence[np.ndarray], cal_densities: Optional[Sequence[np.ndarray]] = None, rows: int = 3 ) -> None: """Plots densities for simulated, raw, and calibrated results on all segments. """ if not cal_densities: cal_densities = [None] * len(raw_densities) cols = (len(raw_densities) + rows - 1) // rows fig, axes = plt.subplots( rows, cols, figsize=(cols * 4, rows * 3.5), sharey=True ) if rows == 1 and cols == 1: axes = [axes] elif rows > 1 and cols > 1: axes = [axes[row, col] for row in range(rows) for col in range(cols)] for i, (ax, raw, cal) in enumerate(zip(axes, raw_densities, cal_densities)): plot_density( ax, sim_density, raw, cal, title=f"Segment {i + 1}", show_legend=False, show_ylabel=i % cols == 0 ) # Common legend for all subplots. handles, labels = ax.get_legend_handles_labels() fig.legend(handles, labels) plt.tight_layout(pad=0.1, w_pad=1.0, h_pad=3.0) """ Explanation: Now we define functions to plot the densities for the simulator, processor without Floquet calibration, and processor with Floquet calibration (which we will use at the end of this notebook). The first function is for a single segment, and the second function is for all segments. End of explanation """ """Extract densities from measurement results.""" # Simulator density. sim_density, = z_densities_from_result(sim_result,[circuit_on_segment]) # Processor densities without Floquet calibration. raw_densities = z_densities_from_result(raw_results, segments) """ Explanation: Visualizing results Note: This section uses helper functions from the previous section to plot results. The code can be safely skimmed: emphasis should be on the plots. To visualize results, we first extract densities from the measurements. End of explanation """ plot_densities(sim_density, raw_densities, rows=int(np.sqrt(line_length / segment_length))) """ Explanation: We first plot the densities on each segment. Note that the simulator densities ("sim") are repeated on each segment and the lines connecting them are just visual guides. End of explanation """ """Plot mean density and variance over segments.""" raw_avg = np.average(raw_densities, axis=0) raw_std = np.std(raw_densities, axis=0, ddof=1) plot_density( plt.gca(), sim_density, raw_density=raw_avg, raw_errors=raw_std, title="Average over segments" ) """ Explanation: We can also look at the average and variance over the segments. End of explanation """ # (calibrated_circuit, calibrations # ) = cg.run_zeta_chi_gamma_compensation_for_moments( # circuit, # device_sampler.sampler, # ) """ Explanation: In the next section, we will use Floquet calibration to produce better average results. After running the circuit with Floquet calibration, we will use these same visualizations to compare results. Execution on the processor with Floquet calibration There are two equivalent ways to use Floquet calibration which we outline below. A rough estimate for the time required for Floquet calibration is about 16 seconds per 10 qubits, plus 30 seconds of overhead, per calibrated moment. Simple usage The first way to use Floquet calibration is via the single function call used at the start of this notebook. Here, we describe the remaining returned values in addition to calibrated_circuit. Note: We comment out this section so Floquet calibration is only executed once in the notebook. End of explanation """ """Step 1: Find moments in the circuit that need to be characterized.""" (characterized_circuit, characterization_requests ) = cg.prepare_characterization_for_moments( circuit, options=cg.FloquetPhasedFSimCalibrationOptions( characterize_theta=False, characterize_zeta=True, characterize_chi=False, characterize_gamma=True, characterize_phi=False ) ) """ Explanation: The returned calibrated_circuit.circuit can then be run on the engine. The full list of returned arguments is as follows: calibrated_circuit.circuit: The input circuit with added $Z$ rotations around each $\sqrt{\text{iSWAP}}$ gate to compensate for errors. calibrated_circuit.moment_to_calibration: Provides an index of the matching characterization (index in calibrations list) for each moment of the calibrated_circuit.circuit, or None if the moment was not characterized (e.g., for a measurement outcome). calibrations: List of characterization results for each characterized moment. Each characterization contains angles for each qubit pair. Step-by-step usage Note: This section is provided to see the Floquet calibration API at a lower level, but the results are identical to the "simple usage" in the previous section. The above function cirq_google.run_zeta_chi_gamma_compensation_for_moments performs the following three steps: Find moments within the circuit that need to be characterized. Characterize them on the engine. Apply corrections to the original circuit. To find moments that need to be characterized, we can do the following. End of explanation """ """Show an example characterization request.""" print(f"Total {len(characterization_requests)} moment(s) to characterize.") print("\nExample request") request = characterization_requests[0] print("Gate:", request.gate) print("Qubit pairs:", request.pairs) print("Options: ", request.options) """ Explanation: The characterization_requests contain information on the operations (gate + qubit pairs) to characterize. End of explanation """ """Step 2: Characterize moments on the engine.""" characterizations = cg.run_calibrations( characterization_requests, device_sampler.sampler, max_layers_per_request=1, ) """ Explanation: We now characterize them on the engine using cirq_google.run_calibrations. End of explanation """ print(f"Total: {len(characterizations)} characterizations.") print() (pair, parameters), *_ = characterizations[0].parameters.items() print(f"Example pair: {pair}") print(f"Example parameters: {parameters}") """ Explanation: The characterizations store characterization results for each pair in each moment, for example. End of explanation """ """Step 3: Apply corrections to the circuit to get a calibrated circuit.""" calibrated_circuit = cg.make_zeta_chi_gamma_compensation_for_moments( characterized_circuit, characterizations ) """ Explanation: Finally, we apply corrections to the original circuit. End of explanation """ print("Portion of calibrated circuit:") print("\n".join( calibrated_circuit.circuit.to_text_diagram(qubit_order=line).splitlines()[:9] + ["..."])) """ Explanation: The calibrated circuit can now be run on the processor. We first inspect the calibrated circuit to compare to the original. End of explanation """ """Run the calibrated circuit on the engine.""" cal_results = device_sampler.sampler.run(calibrated_circuit.circuit, repetitions=nreps) """ Explanation: Note again that $\sqrt{\text{iSWAP}}$ gates are padded by $Z$ phases to compensate for errors. We now run this calibrated circuit. End of explanation """ """Extract densities from measurement results.""" cal_densities = z_densities_from_result(cal_results, segments) """ Explanation: Comparing raw results to calibrated results We now compare results with and without Floquet calibration, again using the simulator results as a baseline for comparison. First we extract the calibrated densities. End of explanation """ plot_densities( sim_density, raw_densities, cal_densities, rows=int(np.sqrt(line_length / segment_length)) ) """ Explanation: Now we reproduce the same density plots from above on each segment, this time including the calibrated ("cal") results. End of explanation """ """Plot mean density and variance over segments.""" raw_avg = np.average(raw_densities, axis=0) raw_std = np.std(raw_densities, axis=0, ddof=1) cal_avg = np.average(cal_densities, axis=0) cal_std = np.std(cal_densities, axis=0, ddof=1) plot_density( plt.gca(), sim_density, raw_avg, cal_avg, raw_std, cal_std, title="Average over segments" ) """ Explanation: We also visualize the mean and variance of results over segments as before. End of explanation """ """Plot errors of raw vs calibrated results.""" fig, axes = plt.subplots(ncols=2, figsize=(15, 4)) axes[0].set_title("Error of the mean") axes[0].set_ylabel("Density") axes[1].set_title("Data standard deviation") colors = ["orange", "green"] labels = ["raw", "cal"] for index, density in enumerate([raw_densities, cal_densities]): color = colors[index] label = labels[index] average_density = np.average(density, axis=0) sites = list(range(len(average_density))) error = np.abs(average_density - sim_density) std_dev = np.std(density, axis=0, ddof=1) axes[0].plot(sites, error, color=color, alpha=0.6) axes[0].scatter(sites, error, color=color) axes[1].plot(sites, std_dev, label=label, color=color, alpha=0.6) axes[1].scatter(sites, std_dev, color=color) for ax in axes: ax.set_xticks(sites) ax.set_xlabel("Qubit index in segment") plt.legend(); """ Explanation: Last, we can look at density errors between raw/calibrated results and simulated results. End of explanation """
tornadozou/tensorflow
tensorflow/tools/docker/notebooks/3_mnist_from_scratch.ipynb
apache-2.0
from __future__ import print_function from IPython.display import Image import base64 Image(data=base64.decodestring("iVBORw0KGgoAAAANSUhEUgAAAMYAAABFCAYAAAARv5krAAAYl0lEQVR4Ae3dV4wc1bYG4D3YYJucc8455yCSSIYrBAi4EjriAZHECyAk3rAID1gCIXGRgIvASIQr8UTmgDA5imByPpicTcYGY+yrbx+tOUWpu2e6u7qnZ7qXVFPVVbv2Xutfce+q7hlasmTJktSAXrnn8vR/3/xXmnnadg1aTfxL3/7rwfSPmT+kf/7vf098YRtK+FnaZaf/SS++OjNNathufF9caiT2v/xxqbTGki/SXyM1nODXv/r8+7Tb+r+lnxZNcEFHEG/e3LnpoINXSh/PWzxCy/F9eWjOnDlLrr/++jR16tQakgylqdOWTZOGFqX5C/5IjXNLjdt7/NTvv/+eTjnllLT//vunr776Kl100UVpueWWq8n10lOmpSmTU5o/f0Fa3DDH1ry9p0/++eefaZ999slYYPS0005LK664Yk2eJ02ekqZNnZx+XzA/LfprYgGxePHitOqqq6YZM2akyfPmzUvXXXddHceoic2EOckxDj300CzPggUL0g033NC3OKy00krDer3pppv6FgcBIjvGUkv9u5paZZVVhoHpl4Mvv/wyhfxDQ0NZ7H7EQbacPHny39Tejzj88ccfacqUKRmHEecYf0Nr8GGAQJ8gMHCMPlH0QMzmEBg4RnN4DVr3CQIDx+gTRQ/EbA6BgWM0h9egdZ8g8PeliD4RutfF/Ouvfz9OtZy8aNGiNH/+/GGWl1122XzseYuVNKtqsaI23Ghw0DYCA8doG8JqO+AUG2+8cVq4cGHaY4890vLLL5/WXXfdfI6jvPDCC3lJ8amnnkoezP3000/pl19+GThHtWpIPekYomTxFS7HnkqKjMsss0yGgFE4r62tSBFVJ02aNPyconi9V4/JwzHwT9ZNNtkkeZ6w5ZZbph133DH99ttv6ccff8zXX3nllcRRnHNfv2cNGMQWGRaOrWbUrjsGBRLAA6U4Lhoqw9h2223ztRBq6aWXzsbgvueffz4Lu9NOO2UnYTgrr7xy7tO9nOH111/Pbb744ov0ww8/jAvngAdFMvQDDjggG/0GG2yQX1GZNm1aziCCwzrrrJPl3muvvXKwePnll9M333wzHDCKWPbLMbuAkfISjnvvvXcW/emnn85lqCBqa4a65hiYR/Gk2RNGRlwm3n7ggQfmdrKD9sqJtdZaKxvCnDlz8n3Tp09PXmPYeuutc0SVNQjvnmuvvTa3efzxx9N33303PGZ5rF75DBvvqq233nrp22+/TWeddVbyikpgxCE4vQDhlQUBRfDw2esbs2fPTquvvnqviNN1PuIdJ4GErVx44YUZowsuuCB9+umn6eeff84BspmsWqljhPFDxjGGYx/lDkN33udajCoVlAjRzl4U8LjefRwnPjsXG8OJqKBd8NB1LTU5IHyCd7LJGOYXNoGjFqaGIKtrERDIDKtukfGMH/zRZa1A101+YBF44KfMYzO8VOYYjDWiukiGqc022yyXOUqdzTffPJ/z1ialeqNVxA9gi0wzlOJ5juJlR8JeddVV+ZrIKTq4ZvJp/8EHH+SU+txzz+W2SqmxVFZRplrH5DTRXmGFFdKuu+6azjjjjOzosl5g6D54CQCI4mGjhNQO5occckh2LvLTA6fqJOEnyhU6kNlkZmUuvrtNcFx77bUzhsZWXgoSsm6t4Dsa/tp2DErCmA04HAI4FLjaaqtlBhmnSKiNY4rDtHZFB6jFMMH0RVDH+nCPYxtDCFJnKkniRbDitWjTK3sykQUuMLPn3DZGX8SFnCG/fVyz5zCCBtIHTLshdzif8fERn8cKXxjCNOwCTu3Qf6yqhV4AQokiP489//zzM0DxnQYKwqAtIkko1kQzFFxvaNcJ6u3Pe+65J/cRRvDee+9lA2BInIyRff/997nNO++8k7t0vl2A6vHWynmyiPJ43WKLLbIijz/++LTddtvlTCdzwIWSg9yjxBJ0GN/DDz+c7zv77LOzbEceeWSekwVGgsOsWbNyNo0+qt7DfPvtt8/dmtvIGnPnzk3PPPPMsJ6rHrNef/BBeJA90RprrJEDcNhctMkXR/mnbccwuCjNGTbaaKMc8TBZprITxOdgOvbuKxqGz6LSJ598kseJ9Gi1CYmSv/76a3YyJZWMZJ6Ceskp8EMusihFEAyUmVaa8G2rxTNHIrd733///eH7YeaLNe5xrEzlWNF/HqQDf0Tm+GIbvYdD43MsKAIo/JDgE0G5aFfN8NaWYxiUshikqGYTTUSt0TCkjXsYNqJQQso+rgGa0vX58ccf56hQTtk+48F92rmvlnE1A0on2uKP0Yrw+Nxzzz0zn+ZhjKwRXq6vueaa2TmUiRQfS7SyNeMks9IV9vrvJOl/q622yo4Mfw5Pvm6TMclLdit6shh+YAMnq1E29tEsteUYBgMSgxa5MOAzJZcVXQs4bUR8XxhCHIwzMALCBuCcx5q0tF3u133l8XrRMchFiRYNyMxBKM/5IjZlWVzjULKwACISytIWFsi56aab5mvOKyEikmdAO/iHY+BDCRUZuoPD1e1akECyLseA7d13352DhdKak8Cmlt3U7TSl9p58FwejYK8ncAwKpDTnGDcARbWiAUjHiNEHsITSPlagpEZChcfrZzwSOfBOiQwXLuR3PjAhtwAD08iAMCO/a+5xPTIm3ALjwERf0V+c69QeT7ZujVdLDhgKBrANXAMreMESRkU7rdVPrXNtZ4xIpSLH1VdfnR3j4IMPzkbw2Wefpa+//jovo5188slZsZjArAcvFP3YY4+lSy+9NEdTdTTy0I5xHHfccfm1CH2LtuORKEqmkwVlVU+sBY+IdJRmE0zeeOONnEXuu+++7AhnnnlmWn/99XMJ5brtzTffzHMJx/o555xzkgdb0U8rRtAKrnTYqtG1Ml6teyxInHDCCdlGYByBmG2Z97ChVvFo2zEwbHCRTbqP7EDxPjN2pUBEe86AXAcsg+f10TYMSTvnRM1ulQe1wG/nHEXZZEJZUIYQ5cgWMsEgMgqclFdkdh+MbFFyuddnWMLNfTYkcuuXHlBkpFYNI3dS+mMMfCHHsZWadfUjmQVn8iLywscG21apMscQwR555JEM3KuvvpoZ5LHOmzgjAvBwzFt2/Oijj3Lm4Ayin/MU/eGHH+b2N998c/5MGSaZ44nw7OEd5Rx77LE5+1EehYXxkpes5li2K6+8Mhv8Lrvsko381ltvzcEBfvHQKh5auk9GPvHEE3NJAx+/eKL/HXbYIQcbK3nwN067xAk4s5VHdbvsx0nxrYQeKxJMZAfBA7GlRx99NC9EtCN7JY4RoPBeAHIAyrB3jpHYwqu1d02d7HpZcfqINo5dL7eJMXtxTzk2sgWFM/gcsnCakI2cFOk+523O+Qw7WaeYHYpYRp9xn4BkbPdWSfgJXYYM+ne+2xRj2sdx8EDu8rm4Ntp9pY4RSmb0CIPOAVNGoLA47yU4S2xen37ppZdy9CkLE/3lm8bJHzJbbiavt2Q9p7AkK7oyXAZOLk7gs9c4PJC0AOE8DDyrgJkaWgYQkSPYuAdpWySfteU8HhqKouYq+io6ZfGeZo7xpbT1+jt+jGULfprpq922ePHMBibwjWVq523KVrzBsIzTaMeu1DFi0HI0YyyYtAekY5MltbRyihFJiROBKIYTwMCTWJNubwdQFCXFapK9z96mtbjgs3thFKWnUgjBzNZIya5FOyUcPG36q4LwRgZ6Ix8HtBk3tirGGU0feAkslHfk5PzBh2cXSkvtWqWOOEaRGcoSHdXDMoYn1tK8yaON0ahbCWgFS/vxSnjn5F4ItLeiFAGAzCKc7MDA1OlIjc4pLFKE7FEyxb5ZPNTbtuiv2fvrtddfOFsYXcwj8d8qv/XGq3femLvvvnvOvrIYPPEjG+PDseDbDnXcMXiyiGiyyACOPvrovN95552zV3/++ef5zVveznlEo6CICvG5l/d4JSvHP+qoo7JjKDs4PkVSGPm9HSz9W5rlPEoCQYHjVFXyRGnBOcKA28VOP/qTBWX6YnS2IKB8qYL/enyGHPbKziOOOCLj6sGeslGW8L6Y4ANr2MY99fpsdL7jjmFwkSTSr6gDVCk+tmDQedcJ5LgdwaLPbu7xjJRRNlErSsiQhVHJlOEQoh182o1wRTnharwYs3itnWP9Rd/RD5mLW5yveh/YRhYMjItyBh/wjPat8tEVx6B00RKo5513XpIl7rzzzuwEourMmTOz95uIcyBfTSXYiy++mCOrSFS1klsFrNZ9eGPoJtmeyRx00EE5cpGbIi21XnbZZbkMee2117KMHIKMIVcotVb/vXoOz6I0+URoMlVFcBFE7L1+IjNYIo6v/fo+D3tC+FCR+FHuwNUCgfOtUlccI5hnJMoIBhN1sBICqMoNNaLP3pkiFGciIIBC4HaEbRWk0dyHb3Mp/EY0I6+NsytvyKxsKhpQr8ozGpm1IZ8IbV+PyllGuyh1YBXXOQEcy6R8M5eAHzuxxX3GRvbaCKJ4aRfXrjkG5jEbk00Prxi8SZTJKmc5/PDDc5v99tsvC+hBjWtqStmD0F4Ma1foMvDtfqZMUc3/lYjMSFFW3NS7JtyyoKzSiTocHoFJHMc+MlK7Mta7n9NbATJerbEYvQWIWCVitIyaXrV3nsG7H2Y2GVcbxyj6NX+waKEPmOvbfShwtjhQDDz5Ygt/uuoY+OPtnICDEMBTWsAQUu0NBBsDEgFEWOADAiDaVRERWsCq5i34IRN+TbTJgn8KwzOFuR4KDUXW7Kyik53Ep8w/+RkxWeO5S1EM5wVABguXMGp69dk1x87D0ObdL32GHI5tsDQGHtwbm/Hw4TpnKvNY5Ge0x113DEwT3tIsIdSnDIfxcxJAevCHfE9cXcmotHXfAw88kIFUdgFjLMn4HuZRuh9FExmjRCCnZxRqcPxz8ioUVk9eRhJkPAYHV8ZVFRkjjFSfAtw222yTy2OZ0iv15fHcQ4dKaMcwsBdEEL26RzaIh5+yK7LSBGPno8yOZX+vzRhfXzZ8cRrtyzzkzpr803XHwB8wTJYIRol+VY8zqMMBbP0f+cExE1qTdbU7x3jwwQdzVBYdesExKNiEWx2MfwoOAyCbJ9uRHZvUTcPmsENhGNE4HBKOHKNqZzQu3KNfX9H1nRABQZlbNkpt4SNo4DWIIesDj9qYnwki2giWqol3330348kZLPm7xvi1Pffcc7MzhA3gy/0oeIuxWtmPiWNgNCIFYwcCAa2FA1ikJZz1aeUVsBmge9TyoqGoIqKUFdEKCFXcU0/pHJizVMUnXBiBh6IicdTTzsEOnuZkDE/2rcJI4KMf/TF+0TucwDhkZ+DGL4/nGkPGV/AIC+2RvfP6ZPTI4gu5XNM/Um7RPzuIFyn1zW7wpQ9UHj+fbOHPmDlGCOGBGIeQQfwuq0jnISBQfOHft7JEHN94Q5xF6XLFFVfkyKIEGyuiGAo3r6BIx0imcM6k+6GHHspOEQbcDq+UTl4BwRu7PstUiPEJFsa9/PLL83nXg6d2xnUvoxS5L7744uGyh/wyRpRF9YwSHsHjE088kWWADQeRFThZkTgBstensZG5h4m56oEdcAp9CwTOVUlj6hgECcGBpA6XDazeiLKhVABQAhKB3cNxbEAL4KoEppm+gjf3OMafDf+UW7zeTL/ltqIiAxBMOIIxnLOHgbFsMGQ4InhE0nJfrXw2hnIRD3SFBKmYWDfqE49woFvOzZno3NxM0HDciMjBDsjEBgLTsJHYN+qjmWtj7hjBLKFFQgL7qRz14jHHHJPBcC2M3wRPVDT5ohzZRv0Z16O/sdozAKmdopUH5kftTrzJpl+lk29CcgpLw3BgpMbwwqF/S80pGJ6xO0WM+8Ybbxw2TuOEoTYakwyovB/JKdzDMVQOHvCRzXju890fL11aGhcMqqIxdwwCRkYQDZAaE7lWBhyosQEmQM439MgffDHm0Si8EcuBC0ezcQSZVKYktzFEW+3sfQ4natRvu9eMTS9F7IvHo+m/2fb6LNuCc0WsW+mzHq9j6hgE9YCHp5tkez2EAVjlMOmyUlU2Lis8ygVR0rykyoltPZCaOY9fr32Qp50X6xi7pWCGbsHBvwLgGIcddljGxvcsjOU1GseyiKjJQWydpiqNsBlei85BfhNxeJunVCl31x0jBOMAjJ9jRC3OEERDS7QMI0qQohIYgLSq7FJuMZbi9WZA7kRbvFAWx5Dyy449mjEDG/dyDPW4VSiy2iNvBcCSUdxyyy35OYHrqJUx843j8I/qQpA074BVVdR1x+AIHCIiIGewsqIuds41tSSlOxeOFHuOQ/E+2zPEuFYVKM32U3RMvGy44YbZMTg2B2+GOIXXJcjpR9lkUy/QyZ7GUU8zAD9RCiuR0oQYVv1IMAk7qFL+rjkGg7GZQPLufffdN69QKJtkCAKKjNGu1p7gMgWDYEDRpkpAmu0rnMLehie/RavcI49Sr1ZW0w6V91ac/IsxmdHPB0U5pQ+4+TExDudNUhPufnaKIn7N6m2k9h11jKLRqP+UQJb2eHh4uYjK0LW1D0MpCq0NR4g24RTR/0hCdvM6/m14FtljeTL4D/liedFeO7LYcyh7eMGDY8X16IM8Vp9kWjj2GwWG5IZb2FKVOHTMMTCvDKBgD2Z22223bNynnnpqVrZXBFxjQDZUFJiwIqKHN8qHO+64IxvN/fffn9vG/VWC0UpfeC5uZMEbg/ctM/8SzYOxZ599Nhs4ebSx0ECpcDFvMCdRggkesoQ+zaHU0N4EgAEnue2227JTON+LgaEVDFu5h+w2Wdl33GFkEUIQqYIqdYwwbJGO8q2xOydqUiTFWpJVPzsuUwhlzzFETxlGdFSCqaMB4XwvUzgKWU3AyW4uwFns4QMbilUyxbq8p/4cw3UEB8FDGQUDx/acqB8zRS2dw5qthe3VatPKucocg6JiYu3lP2nfawvekKVITzgJQLH24QTBtPZeE2D89957b27jwZ1IwIm8R2OMWHmJ+3pxTzaK8l+HyMrgTzrppMxqOIEsGoZvz0nsyWiliRMUl2G9aOk6POyLZVUvYtBpniL4wA1m9lVSW46BOQqKpTLK9FnUsxftvW4swssa4dkhCGFCMNfcp08lhM9KKc4h0obgsa8ShHb6Cv5DJnu8IwHB9TB852DkOlzIRV6kXbSVMfQj48BWdhE0TLr1Fe3zQR/+gRMK5yjuq4KjZccQ2SlYjexHmCnSkiLjtsesmlnpQ5naFo1A5GMAHoJxBI709ttv54ygntZWmWEcQMS9VQleRT9kNmfAG0P3HRPGbHnVudg4gEyJOAYiE0wikHAAcxHyxndO4KI/WHEK/Qzo7wjAXfaFNdurikaNtIERRTqmYIYdE2tGEs8hfJ8iFB/3xV67MCjG8NZbb6Unn3wyC+XfDxfnDxFp496qhK6qn5CDA5twK/fIRH5Gb0MMOhxCFgkKjOBoHqKEkmWvueaanG04iTHcP3CKQO0/e3ZhgceP2smqcKyKRuUYlEKhPDL+d5z1c4qVFTDnmBIZMwZ9DiKAzTmvCetPNFR7W7fXXt/KLddqTcyjr17bRybkEF5XiQhPHnMuDlF07MCB3I49l4EDxTrnfsFBJBxQbQSKeGoROqjdurWzIzoGJqRxS2KUf/rpp2flcRDRjRKVCdpFhCwz7rOVKE5z++235/7uuuuuXDq5P5yKEY0np8B3TKb9K1/vLTF0/7MiJtyRPYrq4fx+7R2e7vFDDzDyfx1goPwcUGMEYG/rFI3oGAYW0UUyimQIcRwGzbgpVsZAUTYE065xCtc5GUeSHTyg4kzKs/FKoSBljyhvTz6y2gseZAwlwgI+cNBGtpV9ZRj4BobjFY9O8g0bQcXWaRpxBE5hHuFnJ0XB6dOn56ge2QGDlK2dFSSG4b8kxVzEdSWGVxgYQLzrxJkIGgbTaUE73b9MZ/KNfIMOJpdcckndYZWmFAwv+wgydW/o8wsCK3xnz56dFzx8oxPGtk7QiI5h0FBaeGzRKYIpjDN2ig6lB9OiprmI60qNieIMIXvsQy7yotjH9eI+2hbPDY4bI8D+2JdnWTYY+iwDs78qaUTHEM0sI1pClAVMnqX9ImGQszB6DHoNOLzZNZlGRlEq9JNB9JOsRXvoxDGnsDTudwFUHTNmzMjDqEaU9xYvGgWiZnka0TEo16CeNyCM1SLtwmt5cNEoCOUa5xjQAIFWEGBP5rbKdTRr1qwcfGUMthXVTCt917pnRMdwE6ZiQm0JckADBMYCgWLwtXjTSeq/d5Y7ieag7wmDwMAxJowqB4JUicDAMapEc9DXhEFgcjxcM7vvR4on7bHS1q84WNkpUr/iEL+aOLRw4cIlQCmuIhUBmsjHlpQ9c7EmzjEsN1vd6DeCg8UVT+qRd7b6EQey8wMT+6El8RSu36xhIO8AgQYI9F94bADG4NIAgUDg/wHX+3lgThDIegAAAABJRU5ErkJggg==".encode('utf-8')), embed=True) """ Explanation: MNIST from scratch This notebook walks through an example of training a TensorFlow model to do digit classification using the MNIST data set. MNIST is a labeled set of images of handwritten digits. An example follows. End of explanation """ import os from six.moves.urllib.request import urlretrieve SOURCE_URL = 'https://storage.googleapis.com/cvdf-datasets/mnist/' WORK_DIRECTORY = "/tmp/mnist-data" def maybe_download(filename): """A helper to download the data files if not present.""" if not os.path.exists(WORK_DIRECTORY): os.mkdir(WORK_DIRECTORY) filepath = os.path.join(WORK_DIRECTORY, filename) if not os.path.exists(filepath): filepath, _ = urlretrieve(SOURCE_URL + filename, filepath) statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') else: print('Already downloaded', filename) return filepath train_data_filename = maybe_download('train-images-idx3-ubyte.gz') train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz') test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz') test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz') """ Explanation: We're going to be building a model that recognizes these digits as 5, 0, and 4. Imports and input data We'll proceed in steps, beginning with importing and inspecting the MNIST data. This doesn't have anything to do with TensorFlow in particular -- we're just downloading the data archive. End of explanation """ import gzip, binascii, struct, numpy import matplotlib.pyplot as plt with gzip.open(test_data_filename) as f: # Print the header fields. for field in ['magic number', 'image count', 'rows', 'columns']: # struct.unpack reads the binary data provided by f.read. # The format string '>i' decodes a big-endian integer, which # is the encoding of the data. print(field, struct.unpack('>i', f.read(4))[0]) # Read the first 28x28 set of pixel values. # Each pixel is one byte, [0, 255], a uint8. buf = f.read(28 * 28) image = numpy.frombuffer(buf, dtype=numpy.uint8) # Print the first few values of image. print('First 10 pixels:', image[:10]) """ Explanation: Working with the images Now we have the files, but the format requires a bit of pre-processing before we can work with it. The data is gzipped, requiring us to decompress it. And, each of the images are grayscale-encoded with values from [0, 255]; we'll normalize these to [-0.5, 0.5]. Let's try to unpack the data using the documented format: [offset] [type] [value] [description] 0000 32 bit integer 0x00000803(2051) magic number 0004 32 bit integer 60000 number of images 0008 32 bit integer 28 number of rows 0012 32 bit integer 28 number of columns 0016 unsigned byte ?? pixel 0017 unsigned byte ?? pixel ........ xxxx unsigned byte ?? pixel Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black). We'll start by reading the first image from the test data as a sanity check. End of explanation """ %matplotlib inline # We'll show the image and its pixel value histogram side-by-side. _, (ax1, ax2) = plt.subplots(1, 2) # To interpret the values as a 28x28 image, we need to reshape # the numpy array, which is one dimensional. ax1.imshow(image.reshape(28, 28), cmap=plt.cm.Greys); ax2.hist(image, bins=20, range=[0,255]); """ Explanation: The first 10 pixels are all 0 values. Not very interesting, but also unsurprising. We'd expect most of the pixel values to be the background color, 0. We could print all 28 * 28 values, but what we really need to do to make sure we're reading our data properly is look at an image. End of explanation """ # Let's convert the uint8 image to 32 bit floats and rescale # the values to be centered around 0, between [-0.5, 0.5]. # # We again plot the image and histogram to check that we # haven't mangled the data. scaled = image.astype(numpy.float32) scaled = (scaled - (255 / 2.0)) / 255 _, (ax1, ax2) = plt.subplots(1, 2) ax1.imshow(scaled.reshape(28, 28), cmap=plt.cm.Greys); ax2.hist(scaled, bins=20, range=[-0.5, 0.5]); """ Explanation: The large number of 0 values correspond to the background of the image, another large mass of value 255 is black, and a mix of grayscale transition values in between. Both the image and histogram look sensible. But, it's good practice when training image models to normalize values to be centered around 0. We'll do that next. The normalization code is fairly short, and it may be tempting to assume we haven't made mistakes, but we'll double-check by looking at the rendered input and histogram again. Malformed inputs are a surprisingly common source of errors when developing new models. End of explanation """ with gzip.open(test_labels_filename) as f: # Print the header fields. for field in ['magic number', 'label count']: print(field, struct.unpack('>i', f.read(4))[0]) print('First label:', struct.unpack('B', f.read(1))[0]) """ Explanation: Great -- we've retained the correct image data while properly rescaling to the range [-0.5, 0.5]. Reading the labels Let's next unpack the test label data. The format here is similar: a magic number followed by a count followed by the labels as uint8 values. In more detail: [offset] [type] [value] [description] 0000 32 bit integer 0x00000801(2049) magic number (MSB first) 0004 32 bit integer 10000 number of items 0008 unsigned byte ?? label 0009 unsigned byte ?? label ........ xxxx unsigned byte ?? label As with the image data, let's read the first test set value to sanity check our input path. We'll expect a 7. End of explanation """ IMAGE_SIZE = 28 PIXEL_DEPTH = 255 def extract_data(filename, num_images): """Extract the images into a 4D tensor [image index, y, x, channels]. For MNIST data, the number of channels is always 1. Values are rescaled from [0, 255] down to [-0.5, 0.5]. """ print('Extracting', filename) with gzip.open(filename) as bytestream: # Skip the magic number and dimensions; we know these values. bytestream.read(16) buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images) data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32) data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1) return data train_data = extract_data(train_data_filename, 60000) test_data = extract_data(test_data_filename, 10000) """ Explanation: Indeed, the first label of the test set is 7. Forming the training, testing, and validation data sets Now that we understand how to read a single element, we can read a much larger set that we'll use for training, testing, and validation. Image data The code below is a generalization of our prototyping above that reads the entire test and training data set. End of explanation """ print('Training data shape', train_data.shape) _, (ax1, ax2) = plt.subplots(1, 2) ax1.imshow(train_data[0].reshape(28, 28), cmap=plt.cm.Greys); ax2.imshow(train_data[1].reshape(28, 28), cmap=plt.cm.Greys); """ Explanation: A crucial difference here is how we reshape the array of pixel values. Instead of one image that's 28x28, we now have a set of 60,000 images, each one being 28x28. We also include a number of channels, which for grayscale images as we have here is 1. Let's make sure we've got the reshaping parameters right by inspecting the dimensions and the first two images. (Again, mangled input is a very common source of errors.) End of explanation """ NUM_LABELS = 10 def extract_labels(filename, num_images): """Extract the labels into a 1-hot matrix [image index, label index].""" print('Extracting', filename) with gzip.open(filename) as bytestream: # Skip the magic number and count; we know these values. bytestream.read(8) buf = bytestream.read(1 * num_images) labels = numpy.frombuffer(buf, dtype=numpy.uint8) # Convert to dense 1-hot representation. return (numpy.arange(NUM_LABELS) == labels[:, None]).astype(numpy.float32) train_labels = extract_labels(train_labels_filename, 60000) test_labels = extract_labels(test_labels_filename, 10000) """ Explanation: Looks good. Now we know how to index our full set of training and test images. Label data Let's move on to loading the full set of labels. As is typical in classification problems, we'll convert our input labels into a 1-hot encoding over a length 10 vector corresponding to 10 digits. The vector [0, 1, 0, 0, 0, 0, 0, 0, 0, 0], for example, would correspond to the digit 1. End of explanation """ print('Training labels shape', train_labels.shape) print('First label vector', train_labels[0]) print('Second label vector', train_labels[1]) """ Explanation: As with our image data, we'll double-check that our 1-hot encoding of the first few values matches our expectations. End of explanation """ VALIDATION_SIZE = 5000 validation_data = train_data[:VALIDATION_SIZE, :, :, :] validation_labels = train_labels[:VALIDATION_SIZE] train_data = train_data[VALIDATION_SIZE:, :, :, :] train_labels = train_labels[VALIDATION_SIZE:] train_size = train_labels.shape[0] print('Validation shape', validation_data.shape) print('Train size', train_size) """ Explanation: The 1-hot encoding looks reasonable. Segmenting data into training, test, and validation The final step in preparing our data is to split it into three sets: training, test, and validation. This isn't the format of the original data set, so we'll take a small slice of the training data and treat that as our validation set. End of explanation """ import tensorflow as tf # We'll bundle groups of examples during training for efficiency. # This defines the size of the batch. BATCH_SIZE = 60 # We have only one channel in our grayscale images. NUM_CHANNELS = 1 # The random seed that defines initialization. SEED = 42 # This is where training samples and labels are fed to the graph. # These placeholder nodes will be fed a batch of training data at each # training step, which we'll write once we define the graph structure. train_data_node = tf.placeholder( tf.float32, shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)) train_labels_node = tf.placeholder(tf.float32, shape=(BATCH_SIZE, NUM_LABELS)) # For the validation and test data, we'll just hold the entire dataset in # one constant node. validation_data_node = tf.constant(validation_data) test_data_node = tf.constant(test_data) # The variables below hold all the trainable weights. For each, the # parameter defines how the variables will be initialized. conv1_weights = tf.Variable( tf.truncated_normal([5, 5, NUM_CHANNELS, 32], # 5x5 filter, depth 32. stddev=0.1, seed=SEED)) conv1_biases = tf.Variable(tf.zeros([32])) conv2_weights = tf.Variable( tf.truncated_normal([5, 5, 32, 64], stddev=0.1, seed=SEED)) conv2_biases = tf.Variable(tf.constant(0.1, shape=[64])) fc1_weights = tf.Variable( # fully connected, depth 512. tf.truncated_normal([IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512], stddev=0.1, seed=SEED)) fc1_biases = tf.Variable(tf.constant(0.1, shape=[512])) fc2_weights = tf.Variable( tf.truncated_normal([512, NUM_LABELS], stddev=0.1, seed=SEED)) fc2_biases = tf.Variable(tf.constant(0.1, shape=[NUM_LABELS])) print('Done') """ Explanation: Defining the model Now that we've prepared our data, we're ready to define our model. The comments describe the architecture, which fairly typical of models that process image data. The raw input passes through several convolution and max pooling layers with rectified linear activations before several fully connected layers and a softmax loss for predicting the output class. During training, we use dropout. We'll separate our model definition into three steps: Defining the variables that will hold the trainable weights. Defining the basic model graph structure described above. And, Stamping out several copies of the model graph for training, testing, and validation. We'll start with the variables. End of explanation """ def model(data, train=False): """The Model definition.""" # 2D convolution, with 'SAME' padding (i.e. the output feature map has # the same size as the input). Note that {strides} is a 4D array whose # shape matches the data layout: [image index, y, x, depth]. conv = tf.nn.conv2d(data, conv1_weights, strides=[1, 1, 1, 1], padding='SAME') # Bias and rectified linear non-linearity. relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases)) # Max pooling. The kernel size spec ksize also follows the layout of # the data. Here we have a pooling window of 2, and a stride of 2. pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') conv = tf.nn.conv2d(pool, conv2_weights, strides=[1, 1, 1, 1], padding='SAME') relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases)) pool = tf.nn.max_pool(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # Reshape the feature map cuboid into a 2D matrix to feed it to the # fully connected layers. pool_shape = pool.get_shape().as_list() reshape = tf.reshape( pool, [pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]]) # Fully connected layer. Note that the '+' operation automatically # broadcasts the biases. hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases) # Add a 50% dropout during training only. Dropout also scales # activations such that no rescaling is needed at evaluation time. if train: hidden = tf.nn.dropout(hidden, 0.5, seed=SEED) return tf.matmul(hidden, fc2_weights) + fc2_biases print('Done') """ Explanation: Now that we've defined the variables to be trained, we're ready to wire them together into a TensorFlow graph. We'll define a helper to do this, model, which will return copies of the graph suitable for training and testing. Note the train argument, which controls whether or not dropout is used in the hidden layer. (We want to use dropout only during training.) End of explanation """ # Training computation: logits + cross-entropy loss. logits = model(train_data_node, True) loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( labels=train_labels_node, logits=logits)) # L2 regularization for the fully connected parameters. regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) + tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases)) # Add the regularization term to the loss. loss += 5e-4 * regularizers # Optimizer: set up a variable that's incremented once per batch and # controls the learning rate decay. batch = tf.Variable(0) # Decay once per epoch, using an exponential schedule starting at 0.01. learning_rate = tf.train.exponential_decay( 0.01, # Base learning rate. batch * BATCH_SIZE, # Current index into the dataset. train_size, # Decay step. 0.95, # Decay rate. staircase=True) # Use simple momentum for the optimization. optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9).minimize(loss, global_step=batch) # Predictions for the minibatch, validation set and test set. train_prediction = tf.nn.softmax(logits) # We'll compute them only once in a while by calling their {eval()} method. validation_prediction = tf.nn.softmax(model(validation_data_node)) test_prediction = tf.nn.softmax(model(test_data_node)) print('Done') """ Explanation: Having defined the basic structure of the graph, we're ready to stamp out multiple copies for training, testing, and validation. Here, we'll do some customizations depending on which graph we're constructing. train_prediction holds the training graph, for which we use cross-entropy loss and weight regularization. We'll adjust the learning rate during training -- that's handled by the exponential_decay operation, which is itself an argument to the MomentumOptimizer that performs the actual training. The validation and prediction graphs are much simpler to generate -- we need only create copies of the model with the validation and test inputs and a softmax classifier as the output. End of explanation """ # Create a new interactive session that we'll use in # subsequent code cells. s = tf.InteractiveSession() # Use our newly created session as the default for # subsequent operations. s.as_default() # Initialize all the variables we defined above. tf.global_variables_initializer().run() """ Explanation: Training and visualizing results Now that we have the training, test, and validation graphs, we're ready to actually go through the training loop and periodically evaluate loss and error. All of these operations take place in the context of a session. In Python, we'd write something like: with tf.Session() as s: ...training / test / evaluation loop... But, here, we'll want to keep the session open so we can poke at values as we work out the details of training. The TensorFlow API includes a function for this, InteractiveSession. We'll start by creating a session and initializing the variables we defined above. End of explanation """ BATCH_SIZE = 60 # Grab the first BATCH_SIZE examples and labels. batch_data = train_data[:BATCH_SIZE, :, :, :] batch_labels = train_labels[:BATCH_SIZE] # This dictionary maps the batch data (as a numpy array) to the # node in the graph it should be fed to. feed_dict = {train_data_node: batch_data, train_labels_node: batch_labels} # Run the graph and fetch some of the nodes. _, l, lr, predictions = s.run( [optimizer, loss, learning_rate, train_prediction], feed_dict=feed_dict) print('Done') """ Explanation: Now we're ready to perform operations on the graph. Let's start with one round of training. We're going to organize our training steps into batches for efficiency; i.e., training using a small set of examples at each step rather than a single example. End of explanation """ print(predictions[0]) """ Explanation: Let's take a look at the predictions. How did we do? Recall that the output will be probabilities over the possible classes, so let's look at those probabilities. End of explanation """ # The highest probability in the first entry. print('First prediction', numpy.argmax(predictions[0])) # But, predictions is actually a list of BATCH_SIZE probability vectors. print(predictions.shape) # So, we'll take the highest probability for each vector. print('All predictions', numpy.argmax(predictions, 1)) """ Explanation: As expected without training, the predictions are all noise. Let's write a scoring function that picks the class with the maximum probability and compares with the example's label. We'll start by converting the probability vectors returned by the softmax into predictions we can match against the labels. End of explanation """ print('Batch labels', numpy.argmax(batch_labels, 1)) """ Explanation: Next, we can do the same thing for our labels -- using argmax to convert our 1-hot encoding into a digit class. End of explanation """ correct = numpy.sum(numpy.argmax(predictions, 1) == numpy.argmax(batch_labels, 1)) total = predictions.shape[0] print(float(correct) / float(total)) confusions = numpy.zeros([10, 10], numpy.float32) bundled = zip(numpy.argmax(predictions, 1), numpy.argmax(batch_labels, 1)) for predicted, actual in bundled: confusions[predicted, actual] += 1 plt.grid(False) plt.xticks(numpy.arange(NUM_LABELS)) plt.yticks(numpy.arange(NUM_LABELS)) plt.imshow(confusions, cmap=plt.cm.jet, interpolation='nearest'); """ Explanation: Now we can compare the predicted and label classes to compute the error rate and confusion matrix for this batch. End of explanation """ def error_rate(predictions, labels): """Return the error rate and confusions.""" correct = numpy.sum(numpy.argmax(predictions, 1) == numpy.argmax(labels, 1)) total = predictions.shape[0] error = 100.0 - (100 * float(correct) / float(total)) confusions = numpy.zeros([10, 10], numpy.float32) bundled = zip(numpy.argmax(predictions, 1), numpy.argmax(labels, 1)) for predicted, actual in bundled: confusions[predicted, actual] += 1 return error, confusions print('Done') """ Explanation: Now let's wrap this up into our scoring function. End of explanation """ # Train over the first 1/4th of our training set. steps = train_size // BATCH_SIZE for step in range(steps): # Compute the offset of the current minibatch in the data. # Note that we could use better randomization across epochs. offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE) batch_data = train_data[offset:(offset + BATCH_SIZE), :, :, :] batch_labels = train_labels[offset:(offset + BATCH_SIZE)] # This dictionary maps the batch data (as a numpy array) to the # node in the graph it should be fed to. feed_dict = {train_data_node: batch_data, train_labels_node: batch_labels} # Run the graph and fetch some of the nodes. _, l, lr, predictions = s.run( [optimizer, loss, learning_rate, train_prediction], feed_dict=feed_dict) # Print out the loss periodically. if step % 100 == 0: error, _ = error_rate(predictions, batch_labels) print('Step %d of %d' % (step, steps)) print('Mini-batch loss: %.5f Error: %.5f Learning rate: %.5f' % (l, error, lr)) print('Validation error: %.1f%%' % error_rate( validation_prediction.eval(), validation_labels)[0]) """ Explanation: We'll need to train for some time to actually see useful predicted values. Let's define a loop that will go through our data. We'll print the loss and error periodically. Here, we want to iterate over the entire data set rather than just the first batch, so we'll need to slice the data to that end. (One pass through our training set will take some time on a CPU, so be patient if you are executing this notebook.) End of explanation """ test_error, confusions = error_rate(test_prediction.eval(), test_labels) print('Test error: %.1f%%' % test_error) plt.xlabel('Actual') plt.ylabel('Predicted') plt.grid(False) plt.xticks(numpy.arange(NUM_LABELS)) plt.yticks(numpy.arange(NUM_LABELS)) plt.imshow(confusions, cmap=plt.cm.jet, interpolation='nearest'); for i, cas in enumerate(confusions): for j, count in enumerate(cas): if count > 0: xoff = .07 * len(str(count)) plt.text(j-xoff, i+.2, int(count), fontsize=9, color='white') """ Explanation: The error seems to have gone down. Let's evaluate the results using the test set. To help identify rare mispredictions, we'll include the raw count of each (prediction, label) pair in the confusion matrix. End of explanation """ plt.xticks(numpy.arange(NUM_LABELS)) plt.hist(numpy.argmax(test_labels, 1)); """ Explanation: We can see here that we're mostly accurate, with some errors you might expect, e.g., '9' is often confused as '4'. Let's do another sanity check to make sure this matches roughly the distribution of our test set, e.g., it seems like we have fewer '5' values. End of explanation """
uber/pyro
tutorial/source/predictive_deterministic.ipynb
apache-2.0
import matplotlib.pyplot as plt import numpy as np import pandas as pd from sklearn.datasets import make_regression import pyro.distributions as dist from pyro.infer import MCMC, NUTS, Predictive from pyro.infer.mcmc.util import summary from pyro.distributions import constraints import pyro import torch pyro.set_rng_seed(101) %matplotlib inline %config InlineBackend.figure_format='retina' """ Explanation: Example: Utilizing Predictive and Deterministic with MCMC and SVI In this short tutorial we'll see how to use deterministic statements inside a model and inspect its samples with Predictive class. Additionally a GammaPoisson distribution will be discussed as it'll be used within our model. Check out other tutorials that use Predictive and Deterministic: Example: analyzing baseball stats with MCMC Bayesian Regression - Inference Algorithms (Part 2) End of explanation """ X, y = make_regression(n_features=1, bias=150., noise=5., random_state=108) X_ = torch.tensor(X, dtype=torch.float) y_ = torch.tensor((y**3)/100000. + 10., dtype=torch.float) y_.round_().clamp_(min=0); plt.scatter(X_, y_) plt.ylabel('y') plt.xlabel('x'); """ Explanation: Data generation Let's generate our data with sklearn.datasets.make_regression method where we can determine the number of features, bias and noise power. Also we'll transform the target variable and make it a torch tensor. End of explanation """ def model(features, counts): N, P = features.shape scale = pyro.sample("scale", dist.LogNormal(0, 1)) coef = pyro.sample("coef", dist.Normal(0, scale).expand([P]).to_event(1)) rate = pyro.deterministic("rate", torch.nn.functional.softplus(coef @ features.T)) concentration = pyro.sample("concentration", dist.LogNormal(0, 1)) with pyro.plate("bins", N): return pyro.sample("counts", dist.GammaPoisson(concentration, rate), obs=counts) """ Explanation: Model definition In our model we first sample coefficient from a normal distribution with zero mean and sampled standard deviation. We use to_event(1) to move the expanded dimension from batch_shape to event_shape as we want to sample from a multivariate normal distribution. deterministic part is used to register a name whose value is fully determined by arguments passed to it. Here we use softplus to be sure that the resulting rate isn't negative. Then we use vectorized version of plate to record counts from passed dataset as they were sampled from GammaPoisson distribution. For now this model might be a little obscure but later we will dive into sampled data to better grasp it's internals. End of explanation """ nuts_kernel = NUTS(model) mcmc = MCMC(nuts_kernel, num_samples=500) %%time mcmc.run(X_, y_); samples = mcmc.get_samples() for k, v in samples.items(): print(f"{k}: {tuple(v.shape)}") predictive = Predictive(model, samples)(X_, None) for k, v in predictive.items(): print(f"{k}: {tuple(v.shape)}") """ Explanation: Inference Inference will be done with MCMC algorithm. IMPORTANT! Please note that only scale and coef variables are returned in samples dict. deterministic parts are available via Predictive, similarly as observed samples. End of explanation """ def prepare_counts_df(predictive): counts = predictive['counts'].numpy() counts_mean = counts.mean(axis=0) counts_std = counts.std(axis=0) counts_df = pd.DataFrame({ "feat": X_.squeeze(), "mean": counts_mean, "high": counts_mean + counts_std, "low": counts_mean - counts_std, }) return counts_df.sort_values(by=['feat']) counts_df = prepare_counts_df(predictive) plt.scatter(X_, y_, c='r') plt.ylabel('y') plt.xlabel('x') plt.plot(counts_df['feat'], counts_df['mean']) plt.fill_between(counts_df['feat'], counts_df['high'], counts_df['low'], alpha=0.5); """ Explanation: After sampling let's see how well our model fits the data. We compute sampled counts mean and standard deviation and plot it against the original data. End of explanation """ print('Concentration mean: ', samples['concentration'].mean().item()) print('Concentration std: ', samples['concentration'].std().item()) print('Coef mean: ', samples['coef'].mean().item()) print('Coef std: ', samples['coef'].std().item()) """ Explanation: But where do these values (and uncertainty) come from? Let's find out! Inspecting deterministic part Now let's move to the essence of this tutorial. GammaPoisson distribution used here and parameterized with (concentration, rate) arguments is basically an alternative parametrization of NegativeBinomial distribution. NegativeBinomial answers a question: How many successes will we record before seeing r failures (overall) if each trial wins with probability p? The reparametrization occurs as follows: concentration = r rate = 1 / (p + 1) First we check sampled mean of concentration and coef variables... End of explanation """ rates = predictive['rate'].squeeze() rates_reparam = 1. / (rates + 1.) # here's reparametrization """ Explanation: ...and do reparametrization (again please note that we get it from predictive!). End of explanation """ fig, (ax1, ax2) = plt.subplots(1, 2) fig.set_size_inches(13, 4) ax1.scatter(X_, rates_reparam.mean(axis=0)) ax1.set_ylabel('mean') ax1.set_xlabel('x') ax1.set_title('rate means') ax2.scatter(X_, rates_reparam.std(axis=0)) ax2.set_ylabel('std') ax2.set_xlabel('x') ax2.set_title('rate stds'); """ Explanation: Now we plot reparametrized rate: End of explanation """ fig, (ax1, ax2) = plt.subplots(1, 2) fig.set_size_inches(13, 4) ax1.scatter(X_, y_, c='r') ax1.plot(counts_df['feat'], counts_df['mean']) ax1.fill_between(counts_df['feat'], counts_df['high'], counts_df['low'], alpha=0.5) ax1.axhline(samples['concentration'].mean().item(), c='g', linestyle='dashed') ax1.axvline(-0.46, c='g', linestyle='dashed') ax1.set_ylabel('y') ax1.set_xlabel('x') ax1.set_title('fitted model') ax2.scatter(X_, rates_reparam.mean(axis=0)) ax2.axhline(0.5, c='g', linestyle='dashed') ax2.axvline(-0.46, c='g', linestyle='dashed') ax2.set_ylabel('mean') ax2.set_xlabel('x') ax2.set_title('rate means'); """ Explanation: We see that the probability of success rises with x. This means that it will take more and more trials before we observe those 28 failures imposed by concentration parameter. Intuitively if we want to record 28 failures where each failure occurs with probability 0.5 then it should also take 28 successes. Let's check if our model follows this logic: End of explanation """ from pyro.infer import SVI, Trace_ELBO from pyro.optim import Adam from pyro.infer.autoguide import AutoNormal """ Explanation: It indeed does. Red lines show that 28 successes and rate 0.5 are located with the same x argument. SVI approach Predictive class can also be used with the SVI method. In the next section we will use it with AutoGuide's guide and manually designed one. End of explanation """ def guide(features, counts): N, P = features.shape scale_param = pyro.param("scale_param", torch.tensor(0.1), constraint=constraints.positive) loc_param = pyro.param("loc_param", torch.tensor(0.0)) scale = pyro.sample("scale", dist.Delta(scale_param)) coef = pyro.sample("coef", dist.Normal(loc_param, scale).expand([P]).to_event(1)) concentration_param = pyro.param("concentration_param", torch.tensor(0.1), constraint=constraints.positive) concentration = pyro.sample("concentration", dist.Delta(concentration_param)) pyro.clear_param_store() adam_params = {"lr": 0.005, "betas": (0.90, 0.999)} optimizer = Adam(adam_params) svi = SVI(model, guide, optimizer, loss=Trace_ELBO()) %%time n_steps = 5001 for step in range(n_steps): loss = svi.step(X_, y_) if step % 1000 == 0: print('Loss: ', loss) """ Explanation: Manually defined guide First we define our guide with all sample sites that are present in the model and parametrize them with learnable parameters. Then we perform gradient descent with Adam optimizer. End of explanation """ list(pyro.get_param_store().items()) predictive_svi = Predictive(model, guide=guide, num_samples=500)(X_, None) for k, v in predictive_svi.items(): print(f"{k}: {tuple(v.shape)}") counts_df = prepare_counts_df(predictive_svi) plt.scatter(X_, y_, c='r') plt.ylabel('y') plt.xlabel('x') plt.plot(counts_df['feat'], counts_df['mean']) plt.fill_between(counts_df['feat'], counts_df['high'], counts_df['low'], alpha=0.5); """ Explanation: Pyros parameter store is comprised of learned parameters that will be used in Predictive stage. Instead of providing samples we pass guide parameter to construct predictive distribution. End of explanation """ pyro.clear_param_store() adam_params = {"lr": 0.005, "betas": (0.90, 0.999)} optimizer = Adam(adam_params) auto_guide = AutoNormal(model) svi = SVI(model, auto_guide, optimizer, loss=Trace_ELBO()) %%time n_steps = 3001 for step in range(n_steps): loss = svi.step(X_, y_) if step % 1000 == 0: print('Loss: ', loss) auto_guide(X_, y_) """ Explanation: AutoGuide Another approach for conducting SVI is to rely on automatic guide generation. Here we use AutoNormal that underneath uses a normal distribution with a diagonal covariance matrix. End of explanation """ list(pyro.get_param_store().items()) """ Explanation: As we check PARAM_STORE we see that each sample site is approximated with a normal distribution. End of explanation """ predictive_svi = Predictive(model, guide=auto_guide, num_samples=500)(X_, None) for k, v in predictive_svi.items(): print(f"{k}: {tuple(v.shape)}") counts_df = prepare_counts_df(predictive_svi) plt.scatter(X_, y_, c='r') plt.ylabel('y') plt.xlabel('x') plt.plot(counts_df['feat'], counts_df['mean']) plt.fill_between(counts_df['feat'], counts_df['high'], counts_df['low'], alpha=0.5); """ Explanation: Finally we again construct a predictive distribution and plot counts. For all three methods we managed to get similar results for our parameters. End of explanation """
tomspur/blog
posts/0001-publication-ready-figures-with-matplotlib-and-ipython-notebook/matplotlib_plots.ipynb
mit
%matplotlib inline import seaborn as snb import numpy as np import matplotlib.pyplot as plt """ Explanation: Publication ready figures with matplotlib and Jupyter notebook A very convenient workflow to analyze data and create figures that can be used in various ways for publication is to use the IPython Notebook or Jupyer notebook in combination with matplotlib. I faced the problem that one often needs different file formats for different kind of publications, such as on a webpage or in a paper. For instance, to put the figure on a webpage, most softwares support only png or jpg formats, so that a fixed resolution must be provided. On the other hand, a scalable figure format can be scaled as needed and when putting it into a pdf document, there won't be artifacts when zooming into the figure. In this blog post, I'll provide a small function that saves the matplotlib figure to various file formats, which can then be used where needed. Creating a simple plot A simple plot can be created within an ipython notebook with: Loading matplotlib and setting up ipython notebook to display the graphics inline: End of explanation """ def create_plot(): x = np.arange(0.0, 10.0, 0.1) plt.plot(x, x**2) plt.xlabel("$x$") plt.ylabel("$y=x^2$") create_plot() plt.show() """ Explanation: Creating a quatratic plot: End of explanation """ def save_to_file(filename, fig=None): """Save to @filename with a custom set of file formats. By default, this function takes to most recent figure, but a @fig can also be passed to this function as an argument. """ formats = [ "pdf", "eps", "png", "pgf", ] if fig is None: for form in formats: plt.savefig("%s.%s"%(filename, form)) else: for form in formats: fig.savefig("%s.%s"%(filename, form)) """ Explanation: Save the figure The previous figure can be saved with calling matplotlib.pyplot.savefig and matplotlib will save the figure in the output format based on the extension of the filename. To save to various formats, one would need to call this function several times or instead define a new function that can be included as boilerplate in the first cell of a notebook such as: End of explanation """ create_plot() save_to_file("simple_plot") """ Explanation: And it can be easily saved with: End of explanation """
tuanavu/coursera-university-of-washington
machine_learning/2_regression/assignment/week3/week-3-polynomial-regression-assignment-exercise.ipynb
mit
import sys sys.path.append('C:\Anaconda2\envs\dato-env\Lib\site-packages') import graphlab """ Explanation: Regression Week 3: Assessing Fit (polynomial regression) In this notebook you will compare different regression models in order to assess which model fits best. We will be using polynomial regression as a means to examine this topic. In particular you will: * Write a function to take an SArray and a degree and return an SFrame where each column is the SArray to a polynomial value up to the total degree e.g. degree = 3 then column 1 is the SArray column 2 is the SArray squared and column 3 is the SArray cubed * Use matplotlib to visualize polynomial regressions * Use matplotlib to visualize the same polynomial degree on different subsets of the data * Use a validation set to select a polynomial degree * Assess the final fit using test data We will continue to use the House data from previous notebooks. Fire up graphlab create End of explanation """ tmp = graphlab.SArray([1., 2., 3.]) tmp_cubed = tmp.apply(lambda x: x**3) print tmp print tmp_cubed """ Explanation: Next we're going to write a polynomial function that takes an SArray and a maximal degree and returns an SFrame with columns containing the SArray to all the powers up to the maximal degree. The easiest way to apply a power to an SArray is to use the .apply() and lambda x: functions. For example to take the example array and compute the third power we can do as follows: (note running this cell the first time may take longer than expected since it loads graphlab) End of explanation """ ex_sframe = graphlab.SFrame() ex_sframe['power_1'] = tmp print ex_sframe """ Explanation: We can create an empty SFrame using graphlab.SFrame() and then add any columns to it with ex_sframe['column_name'] = value. For example we create an empty SFrame and make the column 'power_1' to be the first power of tmp (i.e. tmp itself). End of explanation """ def polynomial_sframe(feature, degree): # assume that degree >= 1 # initialize the SFrame: poly_sframe = graphlab.SFrame() # and set poly_sframe['power_1'] equal to the passed feature poly_sframe['power_1'] = feature # first check if degree > 1 if degree > 1: # then loop over the remaining degrees: # range usually starts at 0 and stops at the endpoint-1. We want it to start at 2 and stop at degree for power in range(2, degree+1): # first we'll give the column a name: name = 'power_' + str(power) # then assign poly_sframe[name] to the appropriate power of feature poly_sframe[name] = feature ** power return poly_sframe """ Explanation: Polynomial_sframe function Using the hints above complete the following function to create an SFrame consisting of the powers of an SArray up to a specific degree: End of explanation """ print polynomial_sframe(tmp, 3) """ Explanation: To test your function consider the smaller tmp variable and what you would expect the outcome of the following call: End of explanation """ sales = graphlab.SFrame('kc_house_data.gl/') """ Explanation: Visualizing polynomial regression Let's use matplotlib to visualize what a polynomial regression looks like on some real data. End of explanation """ sales = sales.sort(['sqft_living', 'price']) """ Explanation: As in Week 3, we will use the sqft_living variable. For plotting purposes (connecting the dots), you'll need to sort by the values of sqft_living. For houses with identical square footage, we break the tie by their prices. End of explanation """ poly1_data = polynomial_sframe(sales['sqft_living'], 1) poly1_data['price'] = sales['price'] # add price to the data since it's the target print poly1_data """ Explanation: Let's start with a degree 1 polynomial using 'sqft_living' (i.e. a line) to predict 'price' and plot what it looks like. End of explanation """ model1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = ['power_1'], validation_set = None) #let's take a look at the weights before we plot model1.get("coefficients") import matplotlib.pyplot as plt %matplotlib inline plt.plot(poly1_data['power_1'],poly1_data['price'],'.', poly1_data['power_1'], model1.predict(poly1_data),'-') """ Explanation: NOTE: for all the models in this notebook use validation_set = None to ensure that all results are consistent across users. End of explanation """ poly2_data = polynomial_sframe(sales['sqft_living'], 2) my_features = poly2_data.column_names() # get the name of the features poly2_data['price'] = sales['price'] # add price to the data since it's the target model2 = graphlab.linear_regression.create(poly2_data, target = 'price', features = my_features, validation_set = None) model2.get("coefficients") plt.plot(poly2_data['power_1'],poly2_data['price'],'.', poly2_data['power_1'], model2.predict(poly2_data),'-') """ Explanation: Let's unpack that plt.plot() command. The first pair of SArrays we passed are the 1st power of sqft and the actual price we then ask it to print these as dots '.'. The next pair we pass is the 1st power of sqft and the predicted values from the linear model. We ask these to be plotted as a line '-'. We can see, not surprisingly, that the predicted values all fall on a line, specifically the one with slope 280 and intercept -43579. What if we wanted to plot a second degree polynomial? End of explanation """ poly3_data = polynomial_sframe(sales['sqft_living'], 3) poly3_features = poly3_data.column_names() # get the name of the features poly3_data['price'] = sales['price'] # add price to the data since it's the target model3 = graphlab.linear_regression.create(poly3_data, target = 'price', features = poly3_features, validation_set = None) model3.get("coefficients") plt.plot(poly3_data['power_1'],poly3_data['price'],'.', poly3_data['power_1'], model3.predict(poly3_data),'-') """ Explanation: The resulting model looks like half a parabola. Try on your own to see what the cubic looks like: End of explanation """ poly15_data = polynomial_sframe(sales['sqft_living'], 15) poly15_features = poly15_data.column_names() # get the name of the features poly15_data['price'] = sales['price'] # add price to the data since it's the target model15 = graphlab.linear_regression.create(poly15_data, target = 'price', features = poly15_features, validation_set = None) model15.get("coefficients") plt.plot(poly15_data['power_1'],poly15_data['price'],'.', poly15_data['power_1'], model15.predict(poly15_data),'-') """ Explanation: Now try a 15th degree polynomial: End of explanation """ bigset_1, bigset_2 = sales.random_split(0.5, seed=0) set_1, set_2 = bigset_1.random_split(0.5, seed=0) set_3, set_4 = bigset_2.random_split(0.5, seed=0) """ Explanation: What do you think of the 15th degree polynomial? Do you think this is appropriate? If we were to change the data do you think you'd get pretty much the same curve? Let's take a look. Changing the data and re-learning We're going to split the sales data into four subsets of roughly equal size. Then you will estimate a 15th degree polynomial model on all four subsets of the data. Print the coefficients (you should use .print_rows(num_rows = 16) to view all of them) and plot the resulting fit (as we did above). The quiz will ask you some questions about these results. To split the sales data into four subsets, we perform the following steps: * First split sales into 2 subsets with .random_split(0.5, seed=0). * Next split the resulting subsets into 2 more subsets each. Use .random_split(0.5, seed=0). We set seed=0 in these steps so that different users get consistent results. You should end up with 4 subsets (set_1, set_2, set_3, set_4) of approximately equal size. End of explanation """ def get_poly_model(set_data): poly15_data = polynomial_sframe(set_data['sqft_living'], 15) poly15_features = poly15_data.column_names() # get the name of the features poly15_data['price'] = set_data['price'] # add price to the data since it's the target model15 = graphlab.linear_regression.create(poly15_data, target = 'price', features = poly15_features, validation_set = None) return poly15_data, model15 def get_coef(set_data): poly15_data, model15 = get_poly_model(set_data) return model15.get("coefficients") def plot_fitted_line(set_data): poly15_data, model15 = get_poly_model(set_data) return plt.plot(poly15_data['power_1'],poly15_data['price'],'.', poly15_data['power_1'], model15.predict(poly15_data),'-') set_1_coef = get_coef(set_1) print set_1_coef[set_1_coef['name'] == 'power_15'] plot_fitted_line(set_1) set_2_coef = get_coef(set_2) print set_2_coef[set_2_coef['name'] == 'power_15'] plot_fitted_line(set_2) set_3_coef = get_coef(set_3) print set_3_coef[set_3_coef['name'] == 'power_15'] plot_fitted_line(set_3) set_4_coef = get_coef(set_4) print set_4_coef[set_4_coef['name'] == 'power_15'] plot_fitted_line(set_4) """ Explanation: Fit a 15th degree polynomial on set_1, set_2, set_3, and set_4 using sqft_living to predict prices. Print the coefficients and make a plot of the resulting model. End of explanation """ training_and_validation_data, test_data = sales.random_split(0.9, seed=1) train_data, validation_data = training_and_validation_data.random_split(0.5, seed=1) """ Explanation: Some questions you will be asked on your quiz: Quiz Question: Is the sign (positive or negative) for power_15 the same in all four models? Quiz Question: (True/False) the plotted fitted lines look the same in all four plots Selecting a Polynomial Degree Whenever we have a "magic" parameter like the degree of the polynomial there is one well-known way to select these parameters: validation set. (We will explore another approach in week 4). We split the sales dataset 3-way into training set, test set, and validation set as follows: Split our sales data into 2 sets: training_and_validation and testing. Use random_split(0.9, seed=1). Further split our training data into two sets: training and validation. Use random_split(0.5, seed=1). Again, we set seed=1 to obtain consistent results for different users. End of explanation """ arr = [] for degree in range(1, 16): poly_data = polynomial_sframe(train_data['sqft_living'], degree) my_features = poly_data.column_names() poly_data['price'] = train_data['price'] model = graphlab.linear_regression.create(poly_data, target = 'price', features = my_features, validation_set = None, verbose = False) validation_data_poly = polynomial_sframe(validation_data['sqft_living'], degree) predictions = model.predict(validation_data_poly) residuals = validation_data['price'] - predictions rss = sum(residuals * residuals) arr.append(rss) # print degree # print rss # for degree, rss in enumerate(arr): # print degree, rss # Note that list index starts from 0, so degree = index + 1 print arr.index(min(arr)), min(arr) """ Explanation: Next you should write a loop that does the following: * For degree in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] (to get this in python type range(1, 15+1)) * Build an SFrame of polynomial data of train_data['sqft_living'] at the current degree * hint: my_features = poly_data.column_names() gives you a list e.g. ['power_1', 'power_2', 'power_3'] which you might find useful for graphlab.linear_regression.create( features = my_features) * Add train_data['price'] to the polynomial SFrame * Learn a polynomial regression model to sqft vs price with that degree on TRAIN data * Compute the RSS on VALIDATION data (here you will want to use .predict()) for that degree and you will need to make a polynmial SFrame using validation data. * Report which degree had the lowest RSS on validation data (remember python indexes from 0) (Note you can turn off the print out of linear_regression.create() with verbose = False) End of explanation """ arr2 = [] for degree in range(1, 16): poly_data = polynomial_sframe(train_data['sqft_living'], degree) my_features = poly_data.column_names() poly_data['price'] = train_data['price'] model = graphlab.linear_regression.create(poly_data, target = 'price', features = my_features, validation_set = None, verbose = False) test_data_poly = polynomial_sframe(test_data['sqft_living'], degree) predictions = model.predict(test_data_poly) residuals = test_data['price'] - predictions rss_test = sum(residuals * residuals) arr2.append(rss_test) # print degree # print rss for degree, rss in enumerate(arr2): print degree, rss print arr2.index(min(arr2)), min(arr2) """ Explanation: Quiz Question: Which degree (1, 2, …, 15) had the lowest RSS on Validation data? Now that you have chosen the degree of your polynomial using validation data, compute the RSS of this model on TEST data. Report the RSS on your quiz. End of explanation """ print arr2[6] """ Explanation: Quiz Question: what is the RSS on TEST data for the model with the degree selected from Validation data? (Make sure you got the correct degree from the previous question) End of explanation """
ellisztamas/faps
docs/tutorials/.ipynb_checkpoints/02_genotype_data-checkpoint.ipynb
mit
import numpy as np import faps as fp print("Created using FAPS version {}.".format(fp.__version__)) """ Explanation: Genotype data in FAPS End of explanation """ allele_freqs = np.random.uniform(0.3,0.5,10) mypop = fp.make_parents(5, allele_freqs, family_name='my_population') """ Explanation: Tom Ellis, March 2017 In most cases, researchers will have a sample of offspring, maternal and candidate paternal individuals typed at a set of markers. In this section we'll look in more detail at how FAPS deals with genotype data to build a matrix we can use for sibship inference. This notebook will examine how to: Generate simple genotypeArray objects and explore what information is contained in them. Import external genotype data. Work with genotype data from multiple half sib families. Checking genotype data is an important step before committing to a full analysis. A case study of data checking and cleaning using an empirical dataset is given in here. In the next section we'll see how to combine genotype information on offspring and a set of candidate parents to create an array of likelihoods of paternity for dyads of offspring and candidate fathers. Note that the first half of this tutorial only deals with the case where you have a genotypeArray object for a single maternal family. If you have multiple families, you can apply what is here to each one, but at some point you'll have to iterate over those families. See below and the specific tutorial on that. Currently, FAPS genotypeArray objects assume you are using biallelic, unlinked SNPs for a diploid. If your system deviates from these criteria in some way you can also skip this stage by creating your own array of paternity likelihoods using an appropriate likelihood function, and importing this directly as a paternityArrays. See the next section for more on paternityArray objects and how they should look. genotypeArray objects Basic genotype information Genotype data are stored in a class of objects called a genotypeArray. We'll illustrate how these work with simulated data, since not all information is available for real-world data sets. We first generate a vector of population allele frequencies for 10 unlinked SNP markers, and use these to create a population of five adult individuals. This is obviously an unrealisticaly small dataset, but serves for illustration. The optional argument family_names allows you to name this generation. End of explanation """ mypop.geno[0] """ Explanation: The object we just created contains information about the genotypes of each of the ten parent individuals. Genotypes are stored as NxLx2-dimensional arrays, where N is the number of individuals and L is the number of loci. We can view the genotype for the first parent like so (recall that Python starts counting from zero, not one): End of explanation """ mypop.geno[:2, :5] """ Explanation: You could subset the array by indexes the genotypes, for example by taking only the first two individuals and the first five loci: End of explanation """ print(mypop.subset([0,2]).names) print(mypop.drop([0,2]).names) """ Explanation: For realistic examples with many more loci, this obviously gets unwieldy pretty soon. It's cleaner to supply a list of individuals to keep or remove to the subset and drop functions. These return return a new genotypeArray for the individuals of interest. End of explanation """ print(mypop.names) # individual names print(mypop.size) # number of individuals print(mypop.nloci) # numbe of loci typed. """ Explanation: Information on individuals A genotypeArray contains other useful information about the individuals: End of explanation """ progeny = fp.make_sibships(mypop, 0, [1,2,3], 4, 'myprogeny') """ Explanation: make_sibships is a convenient way to generate a single half-sibling array from individuals in mypop. This code mates makes a half-sib array with individual 0 as the mothers, with individuals 1, 2 and 3 contributing male gametes. Each father has four offspring each. End of explanation """ print(progeny.fathers) print(progeny.mothers) print(progeny.families) print(progeny.nfamilies) """ Explanation: With this generation we can extract a little extra information from the genotypeArray than we could from the parents about their parents and family structure. End of explanation """ print(progeny.parent_index('mother', mypop.names)) print(progeny.parent_index('father', mypop.names)) """ Explanation: Of course with real data we would not normally know the identity of the father or the number of families, but this is useful for checking accuracy in simulations. It can also be useful to look up the positions of the parents in another list of names. This code finds the indices of the mothers and fathers of the offspring in the names listed in mypop. End of explanation """ mypop.markers """ Explanation: Information on markers Pull out marker names with marker. The names here are boring because they are simulated, but your data can have as exciting names as you'd like. End of explanation """ mypop.markers == progeny.markers """ Explanation: Check whether the locus names for parents and offspring match. This is obvious vital for determining who shares alleles with whom, but easy to overlook! If they don't match, the most likely explanation is that you have imported genotype data and misspecified where the genotype data start (the genotype_col argument). End of explanation """ mypop.allele_freqs() """ Explanation: FAPS uses population allele frequencies to calculate the likelihood that paternal alleles are drawn at random. They are are useful to check the markers are doing what you think they are. Pull out the population allele frequencies for each locus: End of explanation """ print(mypop.missing_data()) print(mypop.heterozygosity()) """ Explanation: We can also check for missing data and heterozygosity for each marker and individual. By default, data for each marker are returned: End of explanation """ print(mypop.missing_data(by='individual')) print(mypop.heterozygosity(by='individual')) """ Explanation: To get summaries for each individual: End of explanation """ offspring = fp.read_genotypes( path = '../../data/offspring_2012_genotypes.csv', mothers_col=1, genotype_col=2) """ Explanation: In this instance there is no missing data, because data are simulated to be error-free. See the next section on an empircal example where this is not true. Importing genotype data You can import genotype data from a text or CSV (comma-separated text) file. Both can be easily exported from a spreadsheet program. Rows index individuals, and columns index each typed locus. More specifically: Offspring names should be given in the first column If the data are offspring, names of the mothers are given in the second column. If known for some reason, names of fathers can be given as well. Genotype information should be given to the right of columns indicating individual or parental names, with locus names in the column headers. SNP genotype data must be biallelic, that is they can only be homozygous for the first allele, heterozygous, or homozygous for the second allele. These should be given as 0, 1 and 2 respectively. If genotype data is missing this should be entered as NA. The following code imports genotype information on real samples of offspring from half-sibling array of wild-pollinated snpadragon seedlings collected in the Spanish Pyrenees. The candidate parents are as many of the wild adult plants as we could find. You will find the data files on the IST Austria data repository (DOI:10.15479/AT:ISTA:95). Aside from the path to where the data file is stored, the two other arguments specify the column containing names of the mothers, and the first column containing genotype data of the offspring. End of explanation """ np.unique(offspring.mothers) """ Explanation: Again, Python starts counting from zero rather than one, so the first column is really column zero, and so on. Because these are CSV, there was no need to specify that data are delimited by commas, but this is included for illustration. Offspring are divided into 60 maternal families of different sizes. You can call the name of the mother of each offspring. You can also call the names of the fathers, with offspring.fathers, but since these are unknown this is not informative. End of explanation """ offspring.names """ Explanation: Offspring names are a combination of maternal family and a unique ID for ecah offspring. End of explanation """ print(offspring.missing_data('individual')[:10]) """ Explanation: You can call summaries of genotype data to help in data cleaning. For example, this code shows the proportion of loci with missing genotype data for the first ten offspring individuals. End of explanation """ print(offspring.missing_data('marker')[:10]) print(offspring.heterozygosity()[:10]) """ Explanation: This snippet shows the proportion of missing data points and heterozygosity for the first ten loci. These can be helpful in identifying dubious loci. End of explanation """ offs_split = offspring.split(by = offspring.mothers) offs_split.keys() """ Explanation: Multiple families In real data set we generally work with multplie half-sibling arrays at once. For downstream analyses we need to split up the genotype data into families to reflect this. This is easy to do with split and a vector of labels to group offspring by. This returns a dictionary of genotypeArray objects labelled by maternal family. These snippet splits up the data and prints the maternal family names. End of explanation """ offs_split["J1246"].names """ Explanation: Each entry is an individual genotypeArray. You can pull out individual families by indexing the dictionary by name. For example, here are the names of the offspring in family J1246: End of explanation """ {family : genArray.size for family,genArray in offs_split.items()} """ Explanation: To perform operations on each genotypeArray we now have to iterate over each element. A convenient way to do this is with dictionary comprehensions by separating out the labels from the genotypeArray objects using items. As an example, here's how you call the number of offspring in each family. It splits up the dictionary into keys for each family, and calls size on each genotypeArray (labelled genArray in the comprehension). End of explanation """ [genArray.size for genArray in offs_split.values()][:10] """ Explanation: You can achieve the same thing with a list comprehension, but you lose information about family ID. It is also more difficult to pass a list on to downstream functions. This snippet shows the first ten items. End of explanation """
mromanello/SunoikisisDC_NER
participants_notebooks/Sunoikisis - Named Entity Extraction 1a_PG.ipynb
gpl-3.0
2 + 3 """ Explanation: Plan of the lecture Introduction: Information Extraction and Named Entity Recognition (NER) NER: definitions and tasks (extraction, classification, disambiguation) basic programming concepts in Python Doing NER with existing libraries: NER from Latin texts with CLTK NER from journal articles with NLTK Python: basic concepts Python is a very flexible and very powerful programming language that can help you working with texts and corpora. Python's phylosophy emphasizes code readability and features a simple and very expressive syntax. It is actually easy to master the basic aspects of Python's syntax: it is amazing how much you can do even with just the most basic concepts... The aim of these two lectures is to introduce to you some of these basic operation, let you see some code in action and also give you some exercise where you can apply what you've seen. It is also amazing how many thing you can accomplish with some well written lines of Python! By the end of this class, we'd like to show you how you use Python to perform (some) Natural Language Processing. But of course, you can even just use Python do somethin as easy as... End of explanation """ result = 2 + 3 #now we print the result print(result) # by the way, I'm a comment. I'm not executed # every line of code following the sign # is ignored: # print("I'm line n. 3: do you see me?") # see? You don't see me... print("I'm line nr. 5 and you DO see me!") """ Explanation: Variables and data types Here we go! we've written our first line of code... But I guess we want to do something a little more interesting, right? Well, for a start, we might want to use Python to execute some operation (say: sum two numbers like 2 and 3) and process the result to print it on the screen, process it, and reuse it as many time as we want... Variables is what we use to store values. Think of it as a shoebox where you place your content; next time you need that content (i.e. the result of a previous operation, or for example some input you've read from a file) you simply call the shoebox name... End of explanation """ result + 5 """ Explanation: That's it! As easy as that (yes, in some programming languages you have to create or declare the variable first and then use it to fill the shoebox; in Python, you go ahead and simply use it!) Now, what do you think we will get when we execute the following code? End of explanation """ type("I am the α and the ω!") type(2.7182818284590452353602874713527) type(True) result = "hello" type(results) """ Explanation: What types of values can we put into a variable? What goes into the shoebox? We can start by the members of this list: Integers (-1,0,1,2,3,4...) Strings ("Hello", "s", "Wolfgang Amadeus Mozart", "I am the α and the ω!"...) floats (3.14159; 2.71828...) Booleans (True, False) If you're not sure what type of value you're dealing with, you can use the function type(). Yes, it works with variables too...! End of explanation """ hello = "goodbye" print(hello) print("hello") type("2") """ Explanation: You declare strings with single ('') or double ("") quote: it's totally indifferent! But now two questions: 1. what happens if you forget the quotes? 2. what happens if you put quotes around a number? End of explanation """ "2" + "3" #probably you wanted this... int("2") + int("3") """ Explanation: String, integer, float... Why is that so important? Well, try to sum two strings and see what happens... End of explanation """ a = "interesting!" print("not very " + a) """ Explanation: But if we are working with strings, then the "+" sign is used to concatenate the strings: End of explanation """ beatles = ["John", "Paul", "George", "Ringo"] type(beatles) # dictionaries collections of key : value pairs beatles_dictionary = { "john" : "John Lennon" , "paul" : "Paul McCartney", "george" : "George Harrison", "ringo" : "Ringo Starr"} type(beatles_dictionary) """ Explanation: Lists and dictionaries Lists and dictionaries are two very useful types to store whole collections of data End of explanation """ print(beatles[0]) #indexes can be negative! beatles[-1] """ Explanation: (there are also other types of collection, like Tuples and Sets, but we won't talk about them now; read the links if you're interested!) Items in list are accessible using their index. Do remember that indexing starts from 0! End of explanation """ beatles_dictionary["john"] beatles_dictionary[0] """ Explanation: Dictionaries are collections of key : value pairs. You access the value using the key as index End of explanation """ beatles.append("Billy Preston") beatles """ Explanation: There are a bunch of methods that you can apply to list to work with them. You can append items at the end of a list End of explanation """ beatles.index("George") """ Explanation: You can learn the index of an item End of explanation """ beatles.insert(0, "Pete Best") print(beatles.index("George")) beatles """ Explanation: You can insert elements at a predefinite index: End of explanation """ beatles[1:5] """ Explanation: But most importantly, you can slice lists, producing sub-lists by specifying the range of indexes you want: End of explanation """ beatles[5] """ Explanation: Do you notice something strange? Yes, the limit index is not inclusive (i.e. item beatles[5] is not included) End of explanation """ beatles[7] """ Explanation: What happens if you specify an index that is too high? End of explanation """ len(beatles) """ Explanation: How can you know how long a list is? End of explanation """ beatles[len(beatles)] """ Explanation: Do remember that indexing starts at 0, so don't make the mistake of thinking that len(yourlist) will give you the last item of your list! End of explanation """ beatles[len(beatles) -1] """ Explanation: This will work! End of explanation """ bassist = "Paul McCartney" if bassist == "Paul McCartney": print("Paul played bass with the Beatles!") """ Explanation: If-statements Most of the times, what you want to do when you program is to check a value and execute some operation depending on whether the value matches some condition. That's where if statements help! In its easiest form, an If statement is syntactic construction that checks whether a condition is met; if it is some part of code is executed End of explanation """ bassist = "Bill Wyman" if bassist == "Paul McCartney": print("I'm part of the if statement...") print("Paul played bass in the Beatles!") """ Explanation: Mind the indentation very much! This is the essential element in the syntax of the statement End of explanation """ bassist = "" if bassist == "Paul McCartney": print("Paul played bass in the Beatles!") else: print("This guy did not play for the Beatles...") """ Explanation: What happens if the condition is not met? Nothing! The indented code is not executed, because the condition is not met, so lines 4 and 5 are simply skipped. But what happens if we de-indent line 5? Can you guess why this is what happes? Most of the time, we need to specify what happens if the conditions are not met End of explanation """ bassist = "Bill" if bassist == "Paul McCartney": print("Paul played bass in the Beatles!") elif bassist == "Bill Wyman": print("Bill Wyman played for the Rolling Stones!") else: print("I don't know what band this guy played for...") """ Explanation: This is the flow: * the condition in line 3 is checked * is it met? * yes: then line 4 is executed * no: then line 6 is executed Or we can specify many different conditions... End of explanation """ for b in beatles: print(b + " was one of the Beatles") """ Explanation: For loops The greatest thing about lists is that thet are iterable, that is you can loop through them. What do we do if we want to apply some line of code to each element in a list? Try with a for loop! A for loop can be paraphrased as: "for each element named x in an iterable (e.g. a list): do some code (e.g. print the value of x)" End of explanation """ beatles = ["John", "Paul", "George", "Ringo"] for b in beatles: if b == "Paul": instrument = "bass" elif b == "John": instrument = "rhythm guitar" elif b == "George": instrument = "lead guitar" elif b == "Ringo": instrument = "drum" print(b + " played " + instrument + " with the Beatles") """ Explanation: Let's break the code down to its parts: * b: an arbitrary name that we give to the variable holding every value in the loop (it could have been any name; b is just very convenient in this case!) * beatles: the list we're iterating through * : as in the if-statements: don't forget the colon! * indent: also, don't forget to indent this code! it's the only thing that is telling python that line 2 is part of the for loop! * line 2: the function that we want to execute for each item in the iterables Now, let's join if statements and for loop to do something nice... End of explanation """ #see? we assign the file-handler to a variable, or we wouldn't be able #to do anything with that! f = open("NOTES.md", "r") """ Explanation: Input and Output One of the most frequent tasks that programmers do is reading data from files, and write some of the output of the programs to a file. In Python (as in many language), we need first to open a file-handler with the appropriate mode in order to process it. Files can be opened in: * read mode ("r") * write mode ("w") * append mode Let's try to read the content of one of the txt files of our Sunoikisis directory First, we open the file handler in read mode: End of explanation """ for l in f: print(l) """ Explanation: note that "r" is optional: read is the default mode! Now there are a bunch of things we can do: * read the full content in one variable with this code: content = f.read() read the lines in a list of lines: lines = f.readlines() or, which is the easiest, simply read the content one line at the time with a for loop; the f object is iterable, so this is as easy as: End of explanation """ f.close() #all together f = open("NOTES.md") for l in f: print(l) f.close() """ Explanation: Once you're done, don't forget to close the handle: End of explanation """ with open("NOTES.md") as f: #mind the indent! for l in f: #double indent, of course! print(l) """ Explanation: Now, there's a shortcut statement, which you'll often see and is very convenient, because it takes care of opening, closing and cleaning up the mess, in case there's some error: End of explanation """ out = open("test.txt", "w") #the file is now open; let's write something in it out.write("This is a test!\nThis is a second line (separated with a new-line feed)") """ Explanation: Now, how about writing to a file? Let's try to write a simple message on a file; first, we open the handler in write mode End of explanation """ #don't worry if you don't understand this code! #We're simply listing the content of the current directory... import os os.listdir() """ Explanation: The file has been created! Let's check this out End of explanation """ out.close() """ Explanation: But before we can do anything (e.g. open it with your favorite text editor) you have to close the file-handler! End of explanation """ with open("test.txt") as f: print(f.read()) """ Explanation: Let's look at its content End of explanation """ with open("test.txt", "w") as out: out.write("Oooops! new content") """ Explanation: Again, also for writing we can use a with statement, which is very handy. But let's have a look at what happens here, so we understand a bit better why "write mode" must be used carefully! End of explanation """ with open("test.txt") as f: print(f.read()) """ Explanation: Let's have a look at the content of "test.txt" now End of explanation """ with open("test.txt", "a") as out: out.write('''\nAnd this is some additional content. The new content is appended at the bottom of the existing file''') with open("test.txt") as f: print(f.read()) """ Explanation: See? After we opened the file in "write mode" for the second time, all content of the file was erased and replaced with the new content that we wrote!!! So keep in mind: when you open a file in "w" mode: if it doesn't exist, a new file with that name is created if it does exist, it is completely overwritten and all previous content is lost If you want to write content to an existing file without losing its pervious content, you have to open the file with the "a" mode: End of explanation """ def printFileContent(file_name): #the function takes one argument: file_name with open(file_name) as f: print(f.read()) """ Explanation: Functions Above, we have opened a file several times to inspect its content. Each time, we had to type the same code over and over. This is the typical case where you would like to save some typing (and write code that is much easier to maintain!) by defining a function A function is a block of reusable code that can be invoked to perform a definite task. Most often (but not necessarily), it accepts one or more arguments and return a certain value. We have already seen one of the built-in functions of Python: print("some str") But it's actually very easy to define your own. Let's define the function to print out the file content, as we said before. Note that this function takes one argument (the file name) and prints out some text, but doesn't return back any value. End of explanation """ printFileContent("README.md") """ Explanation: As usual, mind the indent! file_name (line 1) is the placeholder that we use in the function for any argument that we want to pass to the function in our real-life reuse of the code. Now, if we want to use our function we simply call it with the file name that we want to print out End of explanation """ def sumTwoNumbers(first_int, second_int): s = first_int + second_int return s #could be even shorter: def sumTwoNumbers(first_int, second_int): return first_int + second_int sumTwoNumbers(5, 6) """ Explanation: Now, let's see an example of a function that returns some value to the users. Those functions typically take some argument, process them and yield back the result of this processing. Here's the easiest example possible: a function that takes two numbers as arguments, sum them and returns the result. End of explanation """ s = sumTwoNumbers(5,6) s * 2 """ Explanation: Most often, you want to assign the result returned to a variable, so that you can go on working with the results... End of explanation """ if 1 > 0: print("Well, we know that 1 is bigger than 0!") """ Explanation: Error and exceptions Things can go wrong, especially when you're a beginner. But no panic! Errors and exceptions are actually a good thing! Python gives you detailed reports about what is wrong, so read them carefully and try to figure out what is not right. Once you're getting better, you'll actually learn that you can do something good with the exceptions: you'll learn how to handle them, and to anticipate some of the most common problems that dirty data can face you with... Now, what happens if you forget the all-important syntactic constraint of the code indent? End of explanation """ var = "bla bla" if var1: print("If you see me, then I was defined...") """ Explanation: Pretty clear, isn't it? What you get is an error a construct that is not grammatical in Python's syntax. Note that you're also told where (at what line, and at what point of the code) your error is occurring. That is not always perfect (there are cases where the problem is actually occuring before what Python thinks), but in this case it's pretty OK. What if you forget to define a variable (or you misspell the name of a variable)? End of explanation """ printFileContent("file_that_is_not_there.txt") """ Explanation: You get an exception! The syntax of your code is right, but the execution met with a problem that caused the program to stop. Now, in your program, you can handle selected exception: this means that you can write your code in a way that the program would still be executed even if a certain exception is raised. Let's see what happens if we use our function to try to print the content of a file that doesn't exist: End of explanation """ def printFileContent(file_name): #the function takes one argument: file_name try: with open(file_name) as f: print(f.read()) except FileNotFoundError: print("The file does not exist.\nNevertheless, I do like you, and I will print something to you anyway...") printFileContent("file_that_doesnt_exist.txt") """ Explanation: We get a FileNotFoundError! Now, let's re-write the function so that this event (somebody uses the function with a wrong file name) is taken care of... End of explanation """
Xero-Hige/Notebooks
Algoritmos I/2018-1C/clase-23-03-2018.ipynb
gpl-3.0
def imprimir_fichas_domino(): ''' Imprime las fichas del dominó. ''' for i in range(7): for j in range(i,7): print(i,"/",j,end=" | ") print() def main(): imprimir_fichas_domino() main() """ Explanation: Práctica Alan - Clase del 23/03/2018 Ejercicio 2.7 Escribir un programa que imprima por pantalla todas las fichas de dominó, de una por línea y sin repetir. End of explanation """ def imprimir_fichas_domino(n=6): ''' Imprime las fichas de un juego de dominó con números entre el 0 y n. ''' for i in range(n+1): for j in range(i,n+1): print(i,"/",j,end=" | ") print() def main(): imprimir_fichas_domino(8) main() """ Explanation: Ejercicio 2.8 Modificar el programa anterior para que pueda generar fichas de un juego que puede tener números de 0 a n. End of explanation """ def obtener_producto_maximo(a, b, c, d): ''' Dados cuatro números enteros a, b, c y d, devuelve el producto máximo entre dos de ellos. ''' # Versión con acumulador maximo=max(a*b,a*c) maximo=max(maximo,a*d) maximo=max(maximo,b*c) maximo=max(maximo,b*d) maximo=max(maximo,c*d) return maximo def obtener_producto_maximo_pythonic(a, b, c, d): # Versión python return max(a*b, a*c, a*d, b*c, b*d, c*d) print( "Obtener producto maximo version multilang:", obtener_producto_maximo(2, 3, 4, -1) ) print( "Obtener producto maximo version Python:", obtener_producto_maximo_pythonic(2, 3, 4, -1) ) """ Explanation: Notas de la clase: - El parámetro n = 6 es un parámetro "por defecto" y nos da la libertad de pasarle o no el parámetro a la función. Si el parámetro no se indica, entonces la función hace de cuenta que se le pasó el valor por defecto. Ejercicio 3.3 Escribir una función que, dados cuatro números, devuelva el mayor producto de dos de ellos. Por ejemplo, si recibe los números 1, 5, -2, -4 debe devolver 8, que es el producto más grande que se puede obtener entre ellos (8 = −2 × −4). End of explanation """ Image(filename='./clase-23-03-2018_image/max_pairs.jpg') """ Explanation: Notas de la clase: - En la mayoría de los lenguajes la función max sólo admite dos parámetros. - En Python la función max puede permitir más de dos parámetros con lo cual se puede resolver con una sola llamada a max. Notas adicionales (Razonamiento de la clase) Para pensar este ejercicio, hay que primero notar que nadie internamente hace el maximo de todos contra todos, sino, eventualmente de a pares: End of explanation """ Image(filename='./clase-23-03-2018_image/max_univalue.jpg') """ Explanation: Aunque la forma mas razonable, es no estar guardando muchos resultados intermedios cuando los podemos descartar porque ya sabemos que no sirven: End of explanation """ def imprimir_matriz_identidad(n): ''' Imprime por pantalla una matriz identidad de NxN. ''' for i in range(n): for j in range(n): if i == j: print('1', end=' ') else: print('0', end=' ') print() imprimir_matriz_identidad(3) """ Explanation: Notar que en ambos casos se hace el mismo numero de comparaciones, pero para codificar uno u otro se requieren distinto numero de variables. Mas aun, el segundo modo, permite generalizarlo facilmente para poder utilizarlo dentro de un ciclo, como se hace en un ejercicio mas adelante. Ejercicio 4.3 Escribir una función que reciba por parámetro una dimensión n, e imprima la matriz identidad correspondiente a esa dimensión. End of explanation """ def es_par(n): ''' Devuelve True si el número n es par. ''' return (n % 2 == 0) print('¿Es par 19?', es_par(19)) print('¿Es par 12?', es_par(12)) """ Explanation: Ejercicio 4.1.a Escribir una función que dado un número entero n devuelva si es par o no. End of explanation """ def es_primo(n): ''' Devuelve True si el número n es primo. ''' # Versión con acumulador _es_primo = True for m in range(2,n): _es_primo = _es_primo and (n%m != 0) return _es_primo def es_primo2(n): ''' Devuelve True si el número n es primo. ''' # Versión sin acumulador for m in range(2,n): if n % m == 0: return False return True print('¿Es primo 27?:', es_primo(27)) print('¿Es primo 31?:', es_primo(31)) """ Explanation: Notas de la clase: - No es necesario utilizar un if cuando se va a devolver el valor booleano resultante. La expresión (n % 2 == 0) ya da como resultado un valor de verdad, por lo tanto podemos devolverlo directamente. Ejercicio 4.1.b Escribir una función que dado un número entero n devuelva si es primo o no. End of explanation """ Image(filename='./clase-23-03-2018_image/and_univalue.jpg') """ Explanation: Notas de la clase: - A diferencia del caso anterior, acá no podemos devolver directamente la condición. Esto se debe a que uno de los valores de verdad nos indica el fin de la función pero el otro no. Es decir, si n % m es 0, estamos en condiciones de afirmar que el número no es primo y por lo tanto podemos devolver False directamente. Pero si n % m es distinto de 0, entonces todavia no sabemos nada y tenemos que seguir iterando. End of explanation """
simpeg/simpegdc
notebooks/DC_schumberger_FWD.ipynb
mit
cs = 25. npad = 11 hx = [(cs,npad, -1.3),(cs,41),(cs,npad, 1.3)] hy = [(cs,npad, -1.3),(cs,17),(cs,npad, 1.3)] hz = [(cs,npad, -1.3),(cs,20)] mesh = Mesh.TensorMesh([hx, hy, hz], 'CCN') mesh.plotGrid() """ Explanation: DC Forward Modeling of Schlumber array Here we test the accuracy of DC forward modeling using analytic solution. Step1: Generate mesh End of explanation """ sighalf = 1e-2 sigma = np.ones(mesh.nC)*sighalf """ Explanation: Step2: Generating model End of explanation """ matplotlib.rcParams.update({'font.size': 14, 'text.usetex': True, 'font.family': 'arial'}) ntx = 16 xtemp_txP = np.arange(ntx)*(25.)-500. xtemp_txN = -xtemp_txP ytemp_tx = np.zeros(ntx) xtemp_rxP = -50. xtemp_rxN = 50. ytemp_rx = 0. abhalf = abs(xtemp_txP-xtemp_txN)*0.5 a = xtemp_rxN-xtemp_rxP b = ((xtemp_txN-xtemp_txP)-a)*0.5 fig, ax = plt.subplots(1,1, figsize = (12,3)) for i in range(ntx): ax.plot(np.r_[xtemp_txP[i], xtemp_txP[i]], np.r_[0., 0.4-0.01*(i-1)], 'k-', lw = 1) ax.plot(np.r_[xtemp_txN[i], xtemp_txN[i]], np.r_[0., 0.4-0.01*(i-1)], 'k-', lw = 1) ax.plot(xtemp_txP[i], ytemp_tx[i], 'bo') ax.plot(xtemp_txN[i], ytemp_tx[i], 'ro') ax.plot(np.r_[xtemp_txP[i], xtemp_txN[i]], np.r_[0.4-0.01*(i-1), 0.4-0.01*(i-1)], 'k-', lw = 1) ax.plot(np.r_[xtemp_rxP, xtemp_rxP], np.r_[0., 0.2], 'k-', lw = 1) ax.plot(np.r_[xtemp_rxN, xtemp_rxN], np.r_[0., 0.2], 'k-', lw = 1) ax.plot(xtemp_rxP, ytemp_rx, 'ko') ax.plot(xtemp_rxN, ytemp_rx, 'go') ax.plot(np.r_[xtemp_rxP, xtemp_rxN], np.r_[0.2, 0.2], 'k-', lw = 1) ax.grid(True) ax.set_ylim(-0.2,0.6) ax.set_xlim(-600,600) fig, ax = plt.subplots(1,1, figsize = (6,4)) ax.plot(xtemp_txP, ytemp_tx, 'bo') ax.plot(xtemp_txN, ytemp_tx, 'ro') ax.plot(xtemp_rxP, ytemp_rx, 'ko') ax.plot(xtemp_rxN, ytemp_rx, 'go') ax.legend(('A (C+)', 'B (C-)', 'M (P+)', 'N (C-)'), fontsize = 14) mesh.plotSlice(sigma, grid=True, ax = ax, pcolorOpts={'cmap':'binary'}) ax.set_xlim(-600, 600) ax.set_ylim(-200, 200) ax.set_title('Survey geometry (Plan view)') ax.set_xlabel('x (m)') ax.set_ylabel('y (m)') ax.text(-600, 210, '(a)', fontsize = 16) # fig.savefig('DCsurvey.png', dpi = 200) txlist = [] rx = DC.RxDipole(np.r_[xtemp_rxP, ytemp_rx, -12.5], np.r_[xtemp_rxN, ytemp_rx, -12.5]) for i in range(ntx): tx = DC.SrcDipole([rx], [xtemp_txP[i], ytemp_tx[i], -12.5],[xtemp_txN[i], ytemp_tx[i], -12.5]) txlist.append(tx) survey = DC.SurveyDC(txlist) problem = DC.ProblemDC_CC(mesh) problem.pair(survey) try: from pymatsolver import MumpsSolver problem.Solver = MumpsSolver except Exception, e: problem.Solver = SolverLU """ Explanation: Step3: Design survey: Schulumberger array <img src="http://www.landrinstruments.com/_/rsrc/1271695892678/home/ultra-minires/additional-information-1/schlumberger-soundings/schlum%20array.JPG"> </img> $$ \rho_a = \frac{V}{I}\pi\frac{b(b+a)}{a}$$ Let $b=na$, then we rewrite above equation as: $$ \rho_a = \frac{V}{I}\pi na(n+1)$$ Since AB/2 can be a good measure for depth of investigation, we express $$AB/2 = \frac{(2n+1)a}{2}$$ End of explanation """ %%time data = survey.dpred(sigma) """ Explanation: Step4: Run DC forward modeling End of explanation """ appres = data*np.pi*b*(b+a)/a fig, ax = plt.subplots(1,1, figsize = (6, 4)) ax.semilogx(np.r_[100., 500.], np.r_[100., 100.], 'k--') ax.semilogx(abhalf, appres, 'k.-') ax.set_ylim(90., 120.) ax.set_xscale('log') ax.set_xlabel('AB/2') ax.set_ylabel('Apparent resistivity ($\Omega m$)') ax.grid(True) ax.text(100, 122, '(b)', fontsize = 16) ax.legend(('True', 'simpegDC'), loc = 1, fontsize = 14) # fig.savefig('comp_dc.png') """ Explanation: $$ \rho_a = \frac{V}{I}\pi\frac{b(b+a)}{a}$$ End of explanation """
jstac/quantecon_nyu_2016
lecture14/james_graham_DOLO.ipynb
bsd-3-clause
from dolo import * import numpy as np import matplotlib.pyplot as plt filename = ('https://raw.githubusercontent.com/EconForge/dolo/master/examples/models/rbc.yaml') pcat(filename) # Print the model file """ Explanation: Introducing DOLO What is DOLO? A Python-based language to write and solve a variety of economic models (often dynamic, often stochastic). It uses a simple classification syntax written in YAML (Yet Another Markup Language) to express model objects, which can then be solved in a Python console or Jupyter Notebook. Up front, one of the biggest advantages of DOLO is that it can serve as a straightforward replacement of Dynare for solving DSGE models. We'll talk about this advantage a lot as we go on. Jumping right in... DOLO can be installed very easily with instructions from the website (http://dolo.readthedocs.io/en/doc/) If you have Anaconda, you can quickly install Dolo with: * pip install dolo Website provides a good intro to the Dolo package, but for further information you'll often have to go looking through the code yourself. Most of the algorithms we'll be using can be found in, for example: * C:\Users\James\Anaconda3\Lib\site-packages\dolo\algos * For example, dtcscc holds scripts that solve "Discrete Transition Continuous State Continuous Controls" models An RBC model example Let's begin by importing DOLO and an example RBC model file from EconForge. We then print the model to see what's inside. End of explanation """ model = yaml_import(filename) """ Explanation: The first thing we'll want to do is read, import, and check the steady state of the model object. Doing this with yaml_import, we'll be able to see what "kind" of model has been imported. In this case we have a Discrete Transiton Continuous State Continuous Controls model. Note that we do not need to include the steady state equations in the model file, these can be computed later. End of explanation """ print(model) """ Explanation: Let's take a look at the residuals from the solution for the steady state of the model: End of explanation """ dr_pert = approximate_controls(model, order=1) """ Explanation: Approximate model solutions We can solve models with several DOLO methods. * Pertubation methods: * Only first order for me; I seem to be missing a package for higher orders * Global solutions: * Works very well * Dynare: * Looks like it is still under development Let's first use a first order pertubation method (i.e. a linear approximation) to solve the model using the approximate_controls function. This will use a Schur/QZ decomposition method, which is the same as in (Matlab's) Dynare. We will use the dr_ prefix to denote a solved decision rule object. One nice thing about DOLO is that the solved decision rules are much nicer to work with than in (Matlab's) Dynare. End of explanation """ dr_global_spl = time_iteration(model, pert_order=1, verbose=False, interp_type="spline", interp_orders=[3,3]) dr_global_smol = time_iteration(model, pert_order=1, verbose=False, interp_type="smolyak", smolyak_order=3) """ Explanation: Next, let's find a global solution to the model. The elements of the code are as follows: * time_iteration: is a function that finds a global solution using backward time-iteration. The algorithm iterates on the residuals of the arbitrage equations, in this case the Euler equation and labor supply equation. interp_type lets us use either smolyak interpolation or polynomial spline interpolation for the policy functions. For splines: interp_type sets the order of the polynomial interpolation in each state/dimension. For Smolyak: smolyak_order sets the order of Smolyak interpolation. pert_order: determines the pertubuation order of the model solution which is given as an initial policy. For example, setting equal to one begins with a policy that comes from linear approximation. (Not clear what happens if the model has inequality constraints...) verbose: setting to true makes the number of iterations visible. Let's solve using both spline and Smolyak methods. In each case, we'll use 3rd order polynomials. End of explanation """ %matplotlib inline # Get bounds for the approximations spl_bounds = [numpy.min(dr_global_spl.grid[:,1]), numpy.max(dr_global_spl.grid[:,1])] smol_bounds = [numpy.min(dr_global_smol.grid[:,1]), numpy.max(dr_global_smol.grid[:,1])] plt.figure(figsize=(17, 7)) plt.subplot(121) plot_decision_rule(model, dr_global_spl, 'k', 'i', label='Global: spline', bounds=spl_bounds, linewidth=3, alpha=0.5,color='r') plot_decision_rule(model, dr_global_smol, 'k', 'i', label='Global: Smolyak', bounds=spl_bounds, linewidth=3, alpha=0.5,color='b') plot_decision_rule(model, dr_pert, 'k', 'i', label='Linear perturbation', bounds=spl_bounds, linewidth=3, alpha=0.5,color='g') plt.ylabel('i') plt.title('Investment') plt.legend() plt.grid() plt.subplot(122) plot_decision_rule(model, dr_global_spl, 'k', 'n', label='Global: spline', bounds=spl_bounds, linewidth=3, alpha=0.5,color='r') plot_decision_rule(model, dr_global_smol, 'k', 'n', label='Global: Smolyak', bounds=spl_bounds, linewidth=3, alpha=0.5,color='b') plot_decision_rule(model, dr_pert, 'k', 'n', label='Linear perturbation', bounds=spl_bounds, linewidth=3, alpha=0.5,color='g') plt.ylabel('n') plt.title('Labour') plt.legend() plt.grid() plt.show() """ Explanation: Plotting decision rules Next, let's plot the three kinds of solution: linear approximation, cubic spline approximation, and third order Smolyak approximation. We look at the decision rules for investment and labor supply. Note how much easier it is to observe decision rules than in Dynare. The key command is plot_decision_rule, which takes the decision rule object as an input (and other standard plot inputs). End of explanation """ original_delta=model.calibration_dict['delta'] drs = [] delta_values = np.linspace(0.01, 0.04,5) for val in delta_values: model.set_calibration(delta=val) # Change calibration drs.append(approximate_controls(model, order=1)) # appending another model object to the list plt.figure(figsize=(17, 7)) # Plot investment decision rules plt.subplot(121) for i,dr in enumerate(drs): plot_decision_rule(model, dr, 'k', 'i', label='$\delta={}$'.format(delta_values[i]), bounds=spl_bounds) plt.ylabel('i') plt.title('Investment') plt.grid() # Plot labor decision rules plt.subplot(122) for i,dr in enumerate(drs): plot_decision_rule(model, dr, 'k', 'n', label='$\delta={}$'.format(delta_values[i]), bounds=spl_bounds) plt.ylabel('n') plt.title('Labour') plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.grid() plt.show() # Reset model back to the original calibration model.set_calibration(delta=original_delta) """ Explanation: Comparative statics Next, let's carry out a comparative statics exercise by changing the value of the depreciation rate $\delta$. We'll just use the linearized model for this. First, we'll create a set of linearized models, each solved with a different value of delta. We put these models into a list object, and then call each of them when plotting the associated decision rules. The model.set_calibration command lets us change the calibration of the model object. We then append our chosen model approximation to a list of decision rules, drs. End of explanation """ # Reload the model with the original calibration model = yaml_import(filename) dr_pert = approximate_controls(model, order=1) dr_global_spl = time_iteration(model, pert_order=1, verbose=False, interp_type="spline", interp_orders=[3,3]) """ Explanation: Simulations/Impulse response functions Now we turn to simulating the model. From now on we'll just deal with the linear and spline approximations. First, reload the model with the original calibration. End of explanation """ s0 = model.calibration['states'] sigma2_ez = model.covariances print(str(model.symbols['states'])+'='+str(s0)) # Print the steady state values of each state variable """ Explanation: Now, we start the simulation at the model steady state, and then get the model shocks. End of explanation """ s1 = s0.copy() # Copy steady states s1[0] *= 1.05 # Set size of shock to 5% larger than steady state value irf_glob = simulate(model, dr_global_spl, s1, n_exp=0, horizon=40 ) # Simulate spline model irf_pert = simulate(model, dr_pert, s1, n_exp=0, horizon=40 ) # Simulate linear model """ Explanation: We can simulate functions easily using the simulate function. This is very similar to Dynare's stoch_simul command: it both simulates and creates IRFs. It's inputs are: * model: our RBC model object * dr_: the solved decision rule of choice * s1: the position from where all simulations start, in this case from the date of the shock * n_exp: the number of simulations. Set to 0 to produce IRFs * horizon: the number of simulated periods The simulate command then returns a Pandas table of size $horizon×numvar$. End of explanation """ plt.figure(figsize=(15, 7)) plt.subplot(221) plt.plot(irf_glob['z'],linewidth=2, alpha=0.75,color='b') plt.title('Productivity') plt.grid() plt.subplot(222) plt.plot(irf_glob['i'],linewidth=2, alpha=0.75,color='b',label='Spline') plt.plot(irf_pert['i'],linewidth=2, alpha=0.75,color='r',label='Linear') plt.title('Investment') plt.legend() plt.grid() plt.subplot(223) plt.plot(irf_glob['n'],linewidth=2, alpha=0.75,color='b',label='Spline') plt.plot(irf_pert['n'],linewidth=2, alpha=0.75,color='r',label='Linear') plt.title('Labour') plt.legend() plt.grid() plt.subplot(224) plt.plot(irf_glob['c'],linewidth=2, alpha=0.75,color='b',label='Spline') plt.plot(irf_pert['c'],linewidth=2, alpha=0.75,color='r',label='Linear') plt.title('Consumption') plt.legend() plt.grid() plt.show() """ Explanation: Now let's plot the impulse responses to a 5% shock to productivity: End of explanation """ s1 = s0.copy() # Copy steady states s1[0] *= 1.2 # Set size of shock to 5% larger than steady state value irf_glob = simulate(model, dr_global_spl, s1, n_exp=0, horizon=40 ) # Simulate spline model irf_pert = simulate(model, dr_pert, s1, n_exp=0, horizon=40 ) # Simulate linear model plt.figure(figsize=(15, 7)) plt.subplot(221) plt.plot(irf_glob['z'],linewidth=2, alpha=0.75,color='b') plt.title('Productivity') plt.grid() plt.subplot(222) plt.plot(irf_glob['i'],linewidth=2, alpha=0.75,color='b',label='Spline') plt.plot(irf_pert['i'],linewidth=2, alpha=0.75,color='r',label='Linear') plt.title('Investment') plt.legend() plt.grid() plt.subplot(223) plt.plot(irf_glob['n'],linewidth=2, alpha=0.75,color='b',label='Spline') plt.plot(irf_pert['n'],linewidth=2, alpha=0.75,color='r',label='Linear') plt.title('Labour') plt.legend() plt.grid() plt.subplot(224) plt.plot(irf_glob['c'],linewidth=2, alpha=0.75,color='b',label='Spline') plt.plot(irf_pert['c'],linewidth=2, alpha=0.75,color='r',label='Linear') plt.title('Consumption') plt.legend() plt.grid() plt.show() """ Explanation: Now let's try again with a 20% shock. We can see how much different the spline and linear approximations are in the face of large shocks: End of explanation """ # Reload the model with the original calibration model = yaml_import(filename) # dr_pert = approximate_controls(model, order=1) dr_global_spl = time_iteration(model, pert_order=1, verbose=False, interp_type="spline", interp_orders=[4,4]) from dolo.algos.dtcscc.accuracy import omega err_pert = omega(model, dr_pert) print("Linear approximation\n") print(err_pert) print("--------------\n") err_spl = omega(model, dr_global_spl) print("Cubic spline approximation\n") print(err_spl) print("--------------\n") err_smol = omega(model, dr_global_smol) print("Smolyak approximation\n") print(err_smol) """ Explanation: Approximation errors We can compute the approximation errors for the optimality conditions: $$EulerError = 1 - \beta E \left[ \left( \frac{C_{t+1}}{C_t} \right)^{\sigma}( 1- \delta + r_{k,t+1} ) \right] $$ $$LaborSupplyError = w_t - \chi n_t^{\eta} C_t^{\sigma} $$ First, let's look at the maximum and mean errors (i.e. using the ergodic distribution of the model) over the state space for each of the approximations. We use the omega function to do this. We then print the errors for each equation. End of explanation """ a = err_pert['domain'].a b = err_pert['domain'].b orders = err_pert['domain'].orders errors = np.concatenate((err_pert['errors'].reshape( orders.tolist()+[-1] ), err_spl['errors'].reshape( orders.tolist()+[-1] ), err_smol['errors'].reshape( orders.tolist()+[-1] )), 2) plt.figure(figsize=(15,8)) titles=["Investment demand pertubation errors", "Labor supply pertubation errors", "Investment demand spline errors", "Labor supply spline errors", "Investment demand Smolyak errors", "Labor supply Smolyak errors"] for i in range(6): plt.subplot(3,2,i+1) imgplot = plt.imshow(errors[:,:,i], origin='lower', extent=( a[0], b[0], a[1], b[1]), aspect='auto') imgplot.set_clim(0,3e-4) plt.colorbar() plt.xlabel('z') plt.ylabel('k') plt.title(titles[i]) plt.tight_layout() """ Explanation: We can also visualize the errors over the state space: End of explanation """
thom056/ada-parliament-ML
02-NLP_Sentiment/02-MLOnVotation.ipynb
gpl-2.0
import pandas as pd import glob import os import numpy as np from time import time import logging import gensim import bz2 import re from stop_words import get_stop_words """ Explanation: 02. Machine Learning on the Votations What we aim to perform now is predict the topics that are treated in a Vote, given the short string description of the title of the law (BillTitle) and the BusinessTitle. We apply our model, saved at ../datas/lda/ldamodel to the data from the Voting field, in order to prepare it for the machine learning we'll do later on. 0. Usual Imports End of explanation """ def getTopicForQuery (question,stoplist,dictionary,lda): """ Returns the topic probability distribution for a given input question, filtering with the stoplist and finding the matches in the dictionary of words we have from our topic modelling algorithm. @param question : The string from which we want to extract the topic @param stoplist : The list of common words for the language, that we want to exclude @param dictionary : The dictionary of all the words we find for a given lda model (associated to lda) @param lda : the model of lda (Latent Dirichlet Allocation) that we want to model the topics from. @return the topic probability distribution for the given question """ # 1. Question -> Lower case -> Split -> Exclude common words temp = question.lower() words = re.findall(r'\w+', temp, flags = re.UNICODE | re.LOCALE) important_words = [] important_words = filter(lambda x: x not in stoplist, words) # 2. Find matches in the dictionary of words and get the topics ques_vec = [] ques_vec = dictionary.doc2bow(important_words) return ldamodel.get_document_topics(ques_vec,minimum_probability=0) """ Explanation: 1. initialisation of function for topic determination First of all, we define a function, getTopicForQuery in order to obtain the topics probability dsistribution for the lda model we're currently using. It will be of use mostly to retrieve the topic probability distribution for the attributes from the merged BillTitle and BusinessTitle. End of explanation """ stop_words_de = get_stop_words('de') with open ("../datas/stop_dictionaries/French_stop_words_changed.txt", "r") as myfile: stop_words=myfile.read() stop_words = stop_words.split(',') stop_words = stop_words_de+stop_words ldamodel = gensim.models.LdaModel.load('../datas/lda/ldamodelFR.model', mmap='r') """ Explanation: Now we load the lda model we use along with the stop words, in order to have them available for the time we will use them, avoiding to reload them every time we call getTopicForQuery. We also load our lda model for once. End of explanation """ dataset_tmp = [] path = '../datas/scrap/Voting' allFiles = glob.glob(os.path.join(path, 'Session*.csv')) for file_ in allFiles: print(file_) data_tmp = pd.read_csv(file_) dataset_tmp += [data_tmp] data_frame = pd.concat(dataset_tmp) """ Explanation: 2. Creation of the Voting DataFrame We load the Voting DataFrame, take only the relevant fields for us and add the topic probability distribution before exporting it. It will be ready for our Machine Learning algorithm later on. End of explanation """ parl = data_frame.ParlGroupCode.unique().tolist() #for group in parl : # data_frame.loc[data_frame.ParlGroupCode==group,'ParlGroupCode']= parl.index(group) data_frame.head() votation_frame = data_frame#[['BillTitle','BusinessTitle','FirstName','LastName','Decision','ParlGroupCode','VoteEnd']] votation_frame = votation_frame.fillna(value='') votation_frame['text'] = votation_frame['BillTitle']+' '+votation_frame['BusinessTitle'] """ Explanation: 2.1 slection of interesting fields in data_frame We take only the relevant fields to us, that is - BillTitle : The name of the voted law - BusinessTitle : The description of what is talked about at the parliament - FirstName and LastName : The name of the persone voting - Decision : The vote of the person - text : a field which combine BillTitle and BusinessTitle, we will have a better NLP clustering using this field with our LDA model. End of explanation """ text_dict = {'text': votation_frame.text.unique()} topic_learning_frame = pd.DataFrame(text_dict) topic_learning_frame.head() """ Explanation: We create a smaller DataFrame which contains only the subjects that are voted, we do not repeat the text each time for each person who votes. Hence we will perform the NLP once on each unique entry. End of explanation """ def insert_topic(data_frame) : dict_ = {} dict_['text'] =data_frame['text'].values with open ("../datas/lda/topics.txt", "r") as myfile: s=myfile.read() topics = s.split('\n') topics_dic = {} for topic in topics : if len(topic)>1 : name = topic.split(':') topics_dic[name[0]] = name[1] dictionary = gensim.corpora.Dictionary.load('../datas/lda/ldaDictionaryFR.dict') for index, text in zip(data_frame.index,data_frame['text'].values) : if index%1000 == 0 : print(index) for topic in getTopicForQuery(text,stop_words,dictionary,ldamodel) : if (topics_dic[str(topic[0])]) in dict_ : dict_[topics_dic[str(topic[0])]] +=[topic[1]] else : dict_[topics_dic[str(topic[0])]] =[topic[1]] return dict_ if not os.path.exists("../datas/nlp_results"): os.makedirs("../datas/nlp_results") """ Explanation: 3 Topic Clustering We define first the function insert_topic, which creates a dictionary containing the topics for all texts in a data_frame. End of explanation """ from yandex_translate import YandexTranslate translate = YandexTranslate('trnsl.1.1.20161208T132730Z.fe490b34d7db4e4f.0a4c7781a0273d520073a1550b6a6624c1c3fd0a') text_eng = [] for text in topic_learning_frame.text: s = translate.translate(text, 'fr-en') text_eng += [s['text'][0]] topics_dict = insert_topic(topic_learning_frame) topics_frame = pd.DataFrame(topics_dict) #topics_frame['text_eng'] = text_eng topics_frame.head(5) #topics_frame.to_csv('../datas/nlp_results/voting_with_topics_unique.csv',index=False) """ Explanation: We now create a frame using the topics found using insert_topic End of explanation """ (pd.merge(votation_frame,topics_frame)).to_csv('../datas/nlp_results/voting_with_topics.csv',index=False) def insert_topic_unique(data_frame) : dict_ = {} dict_['text'] =data_frame['text'].values dict_['Topic'] = [] with open ("../datas/lda/topics.txt", "r") as myfile: s=myfile.read() topics = s.split('\n') topics_dic = {} for topic in topics : if len(topic)>1 : name = topic.split(':') topics_dic[name[0]] = name[1] dictionary = gensim.corpora.Dictionary.load('../datas/lda/ldaDictionary') for index, text in zip(data_frame.index,data_frame['text'].values) : if index%1000 == 0 : print(index) max_ = 0 for topic in getTopicForQuery(text,stop_words,dictionary,ldamodel) : max_old = max_ max_ = max(max_,topic[1]) if max_old != max_ : topic_id = topic[0] dict_['Topic'] += [topics_dic[str(topic_id)]] return dict_ topics_s = insert_topic_unique(topic_learning_frame) topics_ss = pd.DataFrame(topics_s) topics_ss.head() topics_ss.to_csv("../datas/nlp_results/voting_single_topic.csv") data_complete = (pd.merge(votation_frame,topics_ss)) data_complete.head(2) data_complete.VoteEnd = [x[:10] for x in data_complete.VoteEnd] data_complete.head(2) not_used = ['Unnamed: 0','BusinessNumber','BusinessShortNumber','CantonID','ID','IdLegislativePeriod', 'IdSession','IdVote','PersonNumber','RegistrationNumber','BillTitle','BusinessTitle','DecisionText', 'Language','MeaningNo','MeaningYes','ParlGroupColour','ParlGroupCode','ParlGroupNameAbbreviation', 'Subject','text'] data_used = data_complete.drop(not_used ,axis=1) #data_used.head() data_mean = data_used.set_index(['VoteEnd','Topic','FirstName']) data_mean.head(1000) data_used.to_csv("../datas/nlp_results/voting_single_topic_not_unique.csv") for year in data_mean.index.get_level_values('VoteEnd').unique() : for Topic in data_mean.loc[year].index.get_level_values('Topic').unique() : print("the {0} the subject : {1} had a result of {2}".format( year,Topic,data_mean.loc[year,Topic].Decision.mean())) """ Explanation: Finally merging the topics with the original frame containing the name and decision of parlementeer ? End of explanation """
rahulremanan/python_tutorial
Hacker_Rank/03-Strings/11_Find_a_string.ipynb
mit
s = 'ABCD' for i in range(0, len(s)): print (s[i]) string = 'ABCABDEABCF' sub_string = 'ABC' string[5:7] def output_substring(string, sub_string): for i in range(0, len(string)-len(sub_string)+1): n = i print (string[n:(n+len(sub_string))]) output_substring(string, sub_string) """ Explanation: Find a string: In this challenge, the user enters a string and a substring. You have to print the number of times that the substring occurs in the given string. String traversal will take place from left to right, not from right to left. NOTE: String letters are case-sensitive. Input Format: The first line of input contains the original string. The next line contains the substring. Constraints: 1 ≤ len(string) ≤ 200 Each character in the string is an ascii character. Output Format: Output the integer number indicating the total number of occurrences of the substring in the original string. Sample Input: ABCDCDC CDC Sample Output: 2 Concept: Some string processing examples, such as these, might be useful. There are a couple of new concepts: In Python, the length of a string is found by the function len(s), where s is the string. To traverse through the length of a string, use a for loop: for i in range(0, len(s)):print (s[i]) A range function is used to loop over some length: range (0, 5) Here, the range loops over 0 to 4 and 5 is excluded. Workbook: End of explanation """ def count_substring(string, sub_string): count = 0 for i in range(0, len(string)-len(sub_string)+1): if (string[i:(i+len(sub_string))]) == sub_string: count+=1 return count count_substring(string, sub_string) """ Explanation: Implement a basic sub-string counter: End of explanation """ try: string.encode('ascii') print ('This string is ascii ...') except: print ('This string is not ascii encoded ...') """ Explanation: Check if a string is ascii encoded: End of explanation """ def count_substring(string, sub_string, verbose = False, min_string_len =1, max_string_len=200): ascii_encoding = False try: string.encode('ascii') sub_string.encode('ascii') ascii_encoding = True if verbose: print ('This string is ascii ...') except: if verbose: print ('This string is not ascii encoded ...') if len(string)>= min_string_len and len(string)<=max_string_len and ascii_encoding: count = 0 for i in range(0, len(string)-len(sub_string)+1): if (string[i:(i+len(sub_string))]) == sub_string: count+=1 return count count_substring(string, sub_string) """ Explanation: Solution: End of explanation """
RajeshThevar/Image-Classification
image-classification/dlnd_image_classification.ipynb
mit
""" DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ from urllib.request import urlretrieve from os.path import isfile, isdir from tqdm import tqdm import problem_unittests as tests import tarfile cifar10_dataset_folder_path = 'cifar-10-batches-py' # Use Floyd's cifar-10 dataset if present floyd_cifar10_location = '/input/cifar-10/python.tar.gz' if isfile(floyd_cifar10_location): tar_gz_path = floyd_cifar10_location else: tar_gz_path = 'cifar-10-python.tar.gz' class DLProgress(tqdm): last_block = 0 def hook(self, block_num=1, block_size=1, total_size=None): self.total = total_size self.update((block_num - self.last_block) * block_size) self.last_block = block_num if not isfile(tar_gz_path): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar: urlretrieve( 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz', tar_gz_path, pbar.hook) if not isdir(cifar10_dataset_folder_path): with tarfile.open(tar_gz_path) as tar: tar.extractall() tar.close() tests.test_folder_path(cifar10_dataset_folder_path) """ Explanation: Image Classification In this project, you'll classify images from the CIFAR-10 dataset. The dataset consists of airplanes, dogs, cats, and other objects. You'll preprocess the images, then train a convolutional neural network on all the samples. The images need to be normalized and the labels need to be one-hot encoded. You'll get to apply what you learned and build a convolutional, max pooling, dropout, and fully connected layers. At the end, you'll get to see your neural network's predictions on the sample images. Get the Data Run the following cell to download the CIFAR-10 dataset for python. End of explanation """ %matplotlib inline %config InlineBackend.figure_format = 'retina' import helper import numpy as np # Explore the dataset batch_id = 2 sample_id = 985 helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id) import tensorflow as tf import sklearn from sklearn import preprocessing import numpy as np """ Explanation: Explore the Data The dataset is broken into batches to prevent your machine from running out of memory. The CIFAR-10 dataset consists of 5 batches, named data_batch_1, data_batch_2, etc.. Each batch contains the labels and images that are one of the following: * airplane * automobile * bird * cat * deer * dog * frog * horse * ship * truck Understanding a dataset is part of making predictions on the data. Play around with the code cell below by changing the batch_id and sample_id. The batch_id is the id for a batch (1-5). The sample_id is the id for a image and label pair in the batch. Ask yourself "What are all possible labels?", "What is the range of values for the image data?", "Are the labels in order or random?". Answers to questions like these will help you preprocess the data and end up with better predictions. End of explanation """ def normalize(x): """ Normalize a list of sample image data in the range of 0 to 1 : x: List of image data. The image shape is (32, 32, 3) : return: Numpy array of normalize data """ # TODO: Implement Function normalize = [] for image in x: normalized_values = (image - np.min(image)) / (np.max(image) - np.min(image)) normalize.append(normalized_values) return np.asarray(normalize) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_normalize(normalize) """ Explanation: Implement Preprocess Functions Normalize In the cell below, implement the normalize function to take in image data, x, and return it as a normalized Numpy array. The values should be in the range of 0 to 1, inclusive. The return object should be the same shape as x. End of explanation """ label_encode = preprocessing.LabelBinarizer() label_encode.fit(range(10)) def one_hot_encode(x): """ One hot encode a list of sample labels. Return a one-hot encoded vector for each label. : x: List of sample Labels : return: Numpy array of one-hot encoded labels """ # TODO: Implement Function encoded = label_encode.transform(x) return encoded """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_one_hot_encode(one_hot_encode) """ Explanation: One-hot encode Just like the previous code cell, you'll be implementing a function for preprocessing. This time, you'll implement the one_hot_encode function. The input, x, are a list of labels. Implement the function to return the list of labels as One-Hot encoded Numpy array. The possible values for labels are 0 to 9. The one-hot encoding function should return the same encoding for each value between each call to one_hot_encode. Make sure to save the map of encodings outside the function. Hint: Don't reinvent the wheel. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ # Preprocess Training, Validation, and Testing Data helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode) """ Explanation: Randomize Data As you saw from exploring the data above, the order of the samples are randomized. It doesn't hurt to randomize it again, but you don't need to for this dataset. Preprocess all the data and save it Running the code cell below will preprocess all the CIFAR-10 data and save it to file. The code below also uses 10% of the training data for validation. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ import pickle import problem_unittests as tests import helper # Load the Preprocessed Validation data valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb')) """ Explanation: Check Point This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk. End of explanation """ import tensorflow as tf def neural_net_image_input(image_shape): """ Return a Tensor for a batch of image input : image_shape: Shape of the images : return: Tensor for image input. """ # TODO: Implement Function batch_size = None image_input = tf.placeholder(tf.float32, shape=[batch_size, image_shape[0], image_shape[1], image_shape[2]], name='x') return image_input def neural_net_label_input(n_classes): """ Return a Tensor for a batch of label input : n_classes: Number of classes : return: Tensor for label input. """ # TODO: Implement Function label_input = tf.placeholder(tf.float32, shape=[None, n_classes], name='y') return label_input def neural_net_keep_prob_input(): """ Return a Tensor for keep probability : return: Tensor for keep probability. """ # TODO: Implement Function keep_probability = tf.placeholder(tf.float32, name='keep_prob') return keep_probability """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tf.reset_default_graph() tests.test_nn_image_inputs(neural_net_image_input) tests.test_nn_label_inputs(neural_net_label_input) tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input) """ Explanation: Build the network For the neural network, you'll build each layer into a function. Most of the code you've seen has been outside of functions. To test your code more thoroughly, we require that you put each layer in a function. This allows us to give you better feedback and test for simple mistakes using our unittests before you submit your project. Note: If you're finding it hard to dedicate enough time for this course each week, we've provided a small shortcut to this part of the project. In the next couple of problems, you'll have the option to use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages to build each layer, except the layers you build in the "Convolutional and Max Pooling Layer" section. TF Layers is similar to Keras's and TFLearn's abstraction to layers, so it's easy to pickup. However, if you would like to get the most out of this course, try to solve all the problems without using anything from the TF Layers packages. You can still use classes from other packages that happen to have the same name as ones you find in TF Layers! For example, instead of using the TF Layers version of the conv2d class, tf.layers.conv2d, you would want to use the TF Neural Network version of conv2d, tf.nn.conv2d. Let's begin! Input The neural network needs to read the image data, one-hot encoded labels, and dropout keep probability. Implement the following functions * Implement neural_net_image_input * Return a TF Placeholder * Set the shape using image_shape with batch size set to None. * Name the TensorFlow placeholder "x" using the TensorFlow name parameter in the TF Placeholder. * Implement neural_net_label_input * Return a TF Placeholder * Set the shape using n_classes with batch size set to None. * Name the TensorFlow placeholder "y" using the TensorFlow name parameter in the TF Placeholder. * Implement neural_net_keep_prob_input * Return a TF Placeholder for dropout keep probability. * Name the TensorFlow placeholder "keep_prob" using the TensorFlow name parameter in the TF Placeholder. These names will be used at the end of the project to load your saved model. Note: None for shapes in TensorFlow allow for a dynamic size. End of explanation """ def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides): """ Apply convolution then max pooling to x_tensor :param x_tensor: TensorFlow Tensor :param conv_num_outputs: Number of outputs for the convolutional layer :param conv_ksize: kernal size 2-D Tuple for the convolutional layer :param conv_strides: Stride 2-D Tuple for convolution :param pool_ksize: kernal size 2-D Tuple for pool :param pool_strides: Stride 2-D Tuple for pool : return: A tensor that represents convolution and max pooling of x_tensor """ # TODO: Implement Function print(x_tensor) weight = tf.Variable(tf.truncated_normal([conv_ksize[0], conv_ksize[1], int(x_tensor.shape[3]), conv_num_outputs])) #int(x_tensor.shape[3]) #x_tensor.get_shape().as_list()[-1] print(weight) bias = tf.Variable(tf.random_normal([conv_num_outputs])) conv = tf.nn.conv2d(x_tensor, weight, strides= [1, conv_strides[0], conv_strides[1], 1], padding='SAME') add = tf.nn.bias_add(conv, bias) activation = tf.nn.relu(add) max_pooling = tf.nn.max_pool(activation, ksize=[1, pool_ksize[0], pool_ksize[1], 1], strides=[1, pool_strides[0], pool_strides[1], 1], padding='SAME') return max_pooling """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_con_pool(conv2d_maxpool) """ Explanation: Convolution and Max Pooling Layer Convolution layers have a lot of success with images. For this code cell, you should implement the function conv2d_maxpool to apply convolution then max pooling: * Create the weight and bias using conv_ksize, conv_num_outputs and the shape of x_tensor. * Apply a convolution to x_tensor using weight and conv_strides. * We recommend you use same padding, but you're welcome to use any padding. * Add bias * Add a nonlinear activation to the convolution. * Apply Max Pooling using pool_ksize and pool_strides. * We recommend you use same padding, but you're welcome to use any padding. Note: You can't use TensorFlow Layers or TensorFlow Layers (contrib) for this layer, but you can still use TensorFlow's Neural Network package. You may still use the shortcut option for all the other layers. End of explanation """ def flatten(x_tensor): """ Flatten x_tensor to (Batch Size, Flattened Image Size) : x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions. : return: A tensor of size (Batch Size, Flattened Image Size). """ # TODO: Implement Function # batch_size = x_tensor.get_shape()[-1] batch_size = x_tensor.shape[0] print(x_tensor) width = x_tensor.shape[1] breadth = x_tensor.shape[2] height = x_tensor.shape[3] image_size = width * breadth * height print(image_size) # flattened_1 = tf.reshape(x_tensor, [batch_size , image_size]) flattened = tf.contrib.layers.flatten(x_tensor) print(flattened) return flattened """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_flatten(flatten) """ Explanation: Flatten Layer Implement the flatten function to change the dimension of x_tensor from a 4-D tensor to a 2-D tensor. The output should be the shape (Batch Size, Flattened Image Size). Shortcut option: you can use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages for this layer. For more of a challenge, only use other TensorFlow packages. End of explanation """ def fully_conn(x_tensor, num_outputs): """ Apply a fully connected layer to x_tensor using weight and bias : x_tensor: A 2-D tensor where the first dimension is batch size. : num_outputs: The number of output that the new tensor should be. : return: A 2-D tensor where the second dimension is num_outputs. """ # TODO: Implement Function print(x_tensor) print(num_outputs) weights = tf.Variable(tf.random_normal((int(x_tensor.get_shape().as_list()[1]), num_outputs), stddev = 0.01)) bias = tf.Variable(tf.zeros(num_outputs)) fully_connected = tf.add(tf.matmul(x_tensor, weights), bias) fully_connected = tf.nn.relu(fully_connected) # fully_connected = tf.contrib.layers.fully_connected(inputs= x_tensor, num_outputs= num_outputs ) return fully_connected """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_fully_conn(fully_conn) """ Explanation: Fully-Connected Layer Implement the fully_conn function to apply a fully connected layer to x_tensor with the shape (Batch Size, num_outputs). Shortcut option: you can use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages for this layer. For more of a challenge, only use other TensorFlow packages. End of explanation """ def output(x_tensor, num_outputs): """ Apply a output layer to x_tensor using weight and bias : x_tensor: A 2-D tensor where the first dimension is batch size. : num_outputs: The number of output that the new tensor should be. : return: A 2-D tensor where the second dimension is num_outputs. """ # TODO: Implement Function weights = tf.Variable(tf.random_normal((int(x_tensor.get_shape().as_list()[1]), num_outputs), stddev = 0.01)) bias = tf.Variable(tf.zeros(num_outputs)) dense = tf.add(tf.matmul(x_tensor, weights), bias) # dense = tf.layers.dense(inputs= x_tensor, units= num_outputs) return dense """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_output(output) """ Explanation: Output Layer Implement the output function to apply a fully connected layer to x_tensor with the shape (Batch Size, num_outputs). Shortcut option: you can use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages for this layer. For more of a challenge, only use other TensorFlow packages. Note: Activation, softmax, or cross entropy should not be applied to this. End of explanation """ def conv_net(x, keep_prob): """ Create a convolutional neural network model : x: Placeholder tensor that holds image data. : keep_prob: Placeholder tensor that hold dropout keep probability. : return: Tensor that represents logits """ # TODO: Apply 1, 2, or 3 Convolution and Max Pool layers # Play around with different number of outputs, kernel size and stride # Function Definition from Above: # conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides) print(x) apply_1 = conv2d_maxpool(x, conv_num_outputs=64, conv_ksize= [2,2], conv_strides= [1,1], pool_ksize= [2,2], pool_strides= [1,1]) dropping_out = tf.layers.dropout(apply_1, rate= keep_prob) # TODO: Apply a Flatten Layer # Function Definition from Above: # flatten(x_tensor) apply_2 = flatten(dropping_out) # TODO: Apply 1, 2, or 3 Fully Connected Layers # Play around with different number of outputs # Function Definition from Above: # fully_conn(x_tensor, num_outputs) apply_3 = fully_conn(apply_2, num_outputs=128) dropping_out_2 = tf.layers.dropout(apply_3, rate= keep_prob) print(apply_3) # TODO: Apply an Output Layer # Set this to the number of classes # Function Definition from Above: # output(x_tensor, num_outputs) logits = output(dropping_out_2, 10) # TODO: return output return logits """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ ############################## ## Build the Neural Network ## ############################## # Remove previous weights, bias, inputs, etc.. tf.reset_default_graph() # Inputs x = neural_net_image_input((32, 32, 3)) y = neural_net_label_input(10) keep_prob = neural_net_keep_prob_input() # Model logits = conv_net(x, keep_prob) # Name logits Tensor, so that is can be loaded from disk after training logits = tf.identity(logits, name='logits') # Loss and Optimizer cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)) optimizer = tf.train.AdamOptimizer().minimize(cost) # Accuracy correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy') tests.test_conv_net(conv_net) """ Explanation: Create Convolutional Model Implement the function conv_net to create a convolutional neural network model. The function takes in a batch of images, x, and outputs logits. Use the layers you created above to create this model: Apply 1, 2, or 3 Convolution and Max Pool layers Apply a Flatten Layer Apply 1, 2, or 3 Fully Connected Layers Apply an Output Layer Return the output Apply TensorFlow's Dropout to one or more layers in the model using keep_prob. End of explanation """ def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch): """ Optimize the session on a batch of images and labels : session: Current TensorFlow session : optimizer: TensorFlow optimizer function : keep_probability: keep probability : feature_batch: Batch of Numpy image data : label_batch: Batch of Numpy label data """ # TODO: Implement Function # images, label = feature_batch, label_batch # session.run(images, label) session.run(optimizer, feed_dict= {x: feature_batch, y: label_batch, keep_prob : keep_probability}) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_train_nn(train_neural_network) """ Explanation: Train the Neural Network Single Optimization Implement the function train_neural_network to do a single optimization. The optimization should use optimizer to optimize in session with a feed_dict of the following: * x for image input * y for labels * keep_prob for keep probability for dropout This function will be called for each batch, so tf.global_variables_initializer() has already been called. Note: Nothing needs to be returned. This function is only optimizing the neural network. End of explanation """ def print_stats(session, feature_batch, label_batch, cost, accuracy): """ Print information about loss and validation accuracy : session: Current TensorFlow session : feature_batch: Batch of Numpy image data : label_batch: Batch of Numpy label data : cost: TensorFlow cost function : accuracy: TensorFlow accuracy function """ # TODO: Implement Function loss = session.run(cost, feed_dict={ x: feature_batch, y:label_batch, keep_prob: 1.0}) validation_accuracy = session.run(accuracy, feed_dict={x: valid_features, y:valid_labels, keep_prob: 1.0}) print("cost: {}, accuracy: {}".format(loss, validation_accuracy)) """ Explanation: Show Stats Implement the function print_stats to print loss and validation accuracy. Use the global variables valid_features and valid_labels to calculate validation accuracy. Use a keep probability of 1.0 to calculate the loss and validation accuracy. End of explanation """ # TODO: Tune Parameters epochs = 100 batch_size = 64 keep_probability = 0.8 """ Explanation: Hyperparameters Tune the following parameters: * Set epochs to the number of iterations until the network stops learning or start overfitting * Set batch_size to the highest number that your machine has memory for. Most people set them to common sizes of memory: * 64 * 128 * 256 * ... * Set keep_probability to the probability of keeping a node using dropout End of explanation """ import time """ DON'T MODIFY ANYTHING IN THIS CELL """ print('Checking the Training on a Single Batch...') with tf.Session() as sess: # Initializing the variables sess.run(tf.global_variables_initializer()) start = time.time() # Training cycle for epoch in range(epochs): batch_i = 1 for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size): train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels) print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='') print_stats(sess, batch_features, batch_labels, cost, accuracy) elapsed_time = float(time.time() - start) # epoch_second = epoch / elapsed_time if elapsed_time > 0 else 0 print('speed (in seconds) = {}'.format(elapsed_time)) """ Explanation: Train on a Single CIFAR-10 Batch Instead of training the neural network on all the CIFAR-10 batches of data, let's use a single batch. This should save time while you iterate on the model to get a better accuracy. Once the final validation accuracy is 50% or greater, run the model on all the data in the next section. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ save_model_path = './image_classification' print('Training...') with tf.Session() as sess: # Initializing the variables sess.run(tf.global_variables_initializer()) # Training cycle for epoch in range(epochs): # Loop over all batches n_batches = 5 for batch_i in range(1, n_batches + 1): for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size): train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels) print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='') print_stats(sess, batch_features, batch_labels, cost, accuracy) # Save Model saver = tf.train.Saver() save_path = saver.save(sess, save_model_path) """ Explanation: Fully Train the Model Now that you got a good accuracy with a single CIFAR-10 batch, try it with all five batches. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ %matplotlib inline %config InlineBackend.figure_format = 'retina' import tensorflow as tf import pickle import helper import random # Set batch size if not already set try: if batch_size: pass except NameError: batch_size = 64 save_model_path = './image_classification' n_samples = 4 top_n_predictions = 3 def test_model(): """ Test the saved model against the test dataset """ test_features, test_labels = pickle.load(open('preprocess_test.p', mode='rb')) loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: # Load model loader = tf.train.import_meta_graph(save_model_path + '.meta') loader.restore(sess, save_model_path) # Get Tensors from loaded model loaded_x = loaded_graph.get_tensor_by_name('x:0') loaded_y = loaded_graph.get_tensor_by_name('y:0') loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0') loaded_logits = loaded_graph.get_tensor_by_name('logits:0') loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0') # Get accuracy in batches for memory limitations test_batch_acc_total = 0 test_batch_count = 0 for test_feature_batch, test_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size): test_batch_acc_total += sess.run( loaded_acc, feed_dict={loaded_x: test_feature_batch, loaded_y: test_label_batch, loaded_keep_prob: 1.0}) test_batch_count += 1 print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count)) # Print Random Samples random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples))) random_test_predictions = sess.run( tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions), feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0}) helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions) test_model() """ Explanation: Checkpoint The model has been saved to disk. Test Model Test your model against the test dataset. This will be your final accuracy. You should have an accuracy greater than 50%. If you don't, keep tweaking the model architecture and parameters. End of explanation """
joekasp/ionic_liquids
ionic_liquids/examples/Example_Workflow.ipynb
mit
import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from rdkit import Chem from rdkit.Chem import AllChem, Descriptors from rdkit.ML.Descriptors.MoleculeDescriptors import MolecularDescriptorCalculator as Calculator import pandas as pd import numpy as np from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Lasso from sklearn.svm import SVR from sklearn.neural_network import MLPRegressor from numpy import linalg as LA """ Explanation: Example of the Workflow This is an example of main.py in the ionic_liquids folder. This notebook is based upon the functions in utils.py and methods/methods.py. I have written this code in such a way that the function will come first and the calling of the function comes right after it. I will first have to import the libraries that are necessary to run this program, including train_test_split that allows for splitting datasets into training sets and test sets necessary to run machine learning. End of explanation """ FILENAME = 'inputdata.xlsx' MODEL = 'mlp_regressor' DIRNAME = 'my_test' """ Explanation: For this example, I will utilize the following filename, machine learning model, and directory name to save the model. End of explanation """ def read_data(filename): """ Reads data in from given file to Pandas DataFrame Inputs ------- filename : string of path to file Returns ------ df : Pandas DataFrame """ cols = filename.split('.') name = cols[0] filetype = cols[1] if (filetype == 'csv'): df = pd.read_csv(filename) elif (filetype in ['xls', 'xlsx']): df = pd.read_excel(filename) else: raise ValueError('Filetype not supported') # clean the data if necessary df['EC_value'], df['EC_error'] = zip(*df['ELE_COD'].map(lambda x: x.split('±'))) y_error = np.copy(df['EC_error']) df = df.drop('EC_error', 1) df = df.drop('ELE_COD', 1) return df, y_error df, y_error = read_data(FILENAME) """ Explanation: The following step prepares the data to be read in the machine_learning methods. First, we need to get the data into a readable form and parse, if necessary. In our case, we need to parse the values and errors in the last column of the FILENAME. End of explanation """ def molecular_descriptors(data): """ Use RDKit to prepare the molecular descriptor Inputs ------ data: dataframe, cleaned csv data Returns ------ prenorm_X: dataframe, normalized input features Y: dataframe, experimental electrical conductivity """ n = data.shape[0] # Choose which molecular descriptor we want list_of_descriptors = ['NumHeteroatoms', 'ExactMolWt', 'NOCount', 'NumHDonors', 'RingCount', 'NumAromaticRings', 'NumSaturatedRings', 'NumAliphaticRings'] # Get the molecular descriptors and their dimension calc = Calculator(list_of_descriptors) D = len(list_of_descriptors) d = len(list_of_descriptors)*2 + 4 #Setting up the X and Y matrices X = np.zeros((n, d)) Y = data['EC_value'] X[:, -3] = data['T'] X[:, -2] = data['P'] X[:, -1] = data['MOLFRC_A'] for i in range(n): A = Chem.MolFromSmiles(data['A'][i]) B = Chem.MolFromSmiles(data['B'][i]) X[i][:D] = calc.CalcDescriptors(A) X[i][D:2*D] = calc.CalcDescriptors(B) prenorm_X = pd.DataFrame(X,columns=['NUM', 'NumHeteroatoms_A', 'MolWt_A', 'NOCount_A','NumHDonors_A', 'RingCount_A', 'NumAromaticRings_A', 'NumSaturatedRings_A', 'NumAliphaticRings_A', 'NumHeteroatoms_B', 'MolWt_B', 'NOCount_B', 'NumHDonors_B', 'RingCount_B', 'NumAromaticRings_B', 'NumSaturatedRings_B', 'NumAliphaticRings_B', 'T', 'P', 'MOLFRC_A']) prenorm_X = prenorm_X.drop('NumAliphaticRings_A', 1) prenorm_X = prenorm_X.drop('NumAliphaticRings_B', 1) return prenorm_X,Y X, y = molecular_descriptors(df) """ Explanation: We will send our data to the molecular descriptor function that will create the molecular descriptors for us, resulting in the X matrix and y vector. Specifically, the X matrix will hold all of our inputs for the machine learning whereas y vector will be the actual electronic conductivity values. End of explanation """ Y = np.empty((y.shape[0],2)) Y[:,0] = y.ravel() Y[:,1] = y_error.ravel() X_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size=0.10) y_train = Y_train[:,0] y_test = Y_test[:,0] e_train = Y_train[:,1] e_test = Y_test[:,1] """ Explanation: We can prepare our testing and training data set for the machine learning calling using train_test_split, a function called from sklearn module of python. End of explanation """ def normalization(data,means=None,stdevs=None): """ Normalizes the data using the means and standard deviations given, calculating them otherwise. Returns the means and standard deviations of columns. Inputs ------ data : Pandas DataFrame means : optional numpy argument of column means stdevs : optional numpy argument of column st. devs Returns ------ normed : the normalized DataFrame means : the numpy row vector of column means stdevs : the numpy row vector of column st. devs """ cols = data.columns data = data.values if (means is None) or (stdevs is None): means = np.mean(data, axis=0) stdevs = np.std(data, axis=0, ddof=1) else: means = np.array(means) stdevs = np.array(stdevs) # handle special case of one row if (len(data.shape) == 1) or (data.shape[0] == 1): for i in range(len(data)): data[i] = (data[i] - means[i]) / stdevs[i] else: for i in range(data.shape[1]): data[:,i] = (data[:,i] - means[i]*np.ones(data.shape[0])) / stdevs[i] normed = pd.DataFrame(data, columns=cols) return normed, means, stdevs X_train, X_mean, X_std = normalization(X_train) X_test, trash, trash = normalization(X_test,means=X_mean,stdevs=X_std) """ Explanation: Followingly, the program will normalize the testing data using the training data set. This will also provide us with the mean value and standard deviation of X. End of explanation """ def do_svr(X,Y): Y.ravel() """ Call the Support Vector Regressor, Fit the weight on the training set Input ------ X: dataframe, n*m, n is number of data points, m is number of features y: experimental electrical conductivity Returns ------ svr: objective, the regressor objective """ svr = SVR(kernel='rbf', degree=3, gamma='auto', coef0=0.0, tol=0.001, C=1.0, epsilon=0.01, shrinking=True, cache_size=200, verbose=False, max_iter=-1) grid_search = GridSearchCV(svr, cv=5, param_grid={"C": [1e0, 1e1, 1e2, 1e3],"gamma": np.logspace(-2, 2, 5)}) grid_search.fit(X,Y) svr.alpha_ = grid_search.best_params_['alpha'] svr.fit(X,Y) return svr def do_MLP_regressor(X,Y): Y.ravel() """ Call the MLP Regressor, Fit the weight on the training set Input ------ X: dataframe, n*m, n is number of data points, m is number of features y: experimental electrical conductivity Returns ------ mlp_regr : the MLP object with the best parameters """ alphas = np.array([0.1,0.01,0.001,0.0001]) mlp_regr = MLPRegressor(hidden_layer_sizes=(100,), activation='tanh', solver='sgd', alpha=0.0001, max_iter=5000, random_state=None,learning_rate_init=0.01) grid_search = GridSearchCV(mlp_regr, param_grid=dict(alpha=alphas)) grid_search.fit(X,Y) #print(grid_search.best_params_) mlp_regr.alpha_ = grid_search.best_params_['alpha'] mlp_regr.fit(X,Y) return mlp_regr def do_lasso(X,Y): Y.ravel() """ Runs a lasso grid search on the input data Inputs ------ X: dataframe, n*m, n is number of data points, m is number of features y: experimental electrical conductivity Returns ------ lasso : sklearn object with the model information """ alphas = np.array([0.1,0.01,0.001,0.0001]) lasso = Lasso(alpha=0.001, fit_intercept=True, normalize=False, precompute=False, copy_X=True, max_iter=10000, tol=0.001, positive=False, random_state=None, selection='cyclic') gs = GridSearchCV(lasso, param_grid=dict(alpha=alphas)) gs.fit(X,Y) lasso.alpha_ = gs.best_params_['alpha'] lasso.fit(X,Y) return lasso """ Explanation: We coded three models into our program: MLP_regressor, LASSO, and SVR. Each of these models are well documented in sklearn, a library in python. In the actual program, you can use all three models, but for the purpose of this example, we chose mlp_regressor. The ValueError will only raise if you do not use one of the three models. A good example is if you were to change the MODEL used to 'MLP_classifier'. End of explanation """ if (MODEL.lower() == 'mlp_regressor'): obj = do_MLP_regressor(X_train, y_train.ravel()) elif (MODEL.lower() == 'lasso'): obj = do_lasso(X_train, y_train.ravel()) elif (MODEL.lower() == 'svr'): obj = do_svr(X_train, y_train.ravel()) else: raise ValueError("Model not supported") """ Explanation: The code below are calling to the functions above and is the machine learning portion of the package. End of explanation """ FIG_SIZE = (4,4) def parity_plot(Y_act, Y_pred): """ Creates a parity plot Input ----- y_pred : predicted values from the model (Lasso, SVR, or MLP) y_act : 'true' (actual) values Output ------ fig : matplotlib figure """ fig = plt.figure(figsize=FIG_SIZE) plt.scatter(list(Y_act), list(Y_pred)) plt.plot([Y_act.min(), Y_act.max()], [Y_act.min(), Y_act.max()], lw=4, color='r') plt.xlabel('Experimental') plt.ylabel('Predicted') return fig my_plot = parity_plot(y_train, obj.predict(X_train)) plt.show(my_plot) """ Explanation: Lastly, the experimental values will be scatter plotted against the predicted values. We will use the parity_plot to do so. plt.show() function will just allow the plot to show up. End of explanation """ def error_values(X_train,X_test,Y_train,Y_test): """ Creates the two predicted values Input ----- X_train : numpy array, the 10% of the training set data values X_test : numpy array, the molecular descriptors for the testing set data values Y_train: numpy array, the 10% of the training set of electronic conductivity values Y_test: numpy array, 'true' (actual) electronic conductivity values Output ------ yh : the prediction output for training data set yh2 : the prediction output for the testing data set """ #setting up parameters and variables for plotting n_train = X_train.shape[0] n_test = X_test.shape[0] d = X_train.shape[1] hdnode = 100 w1 = np.random.normal(0,0.001,d*hdnode).reshape((d,hdnode)) d1 = np.zeros((d,hdnode)) w2 = np.random.normal(0,0.001,hdnode).reshape((hdnode,1)) d2 = np.zeros(hdnode) h = np.zeros(hdnode) mb = 100 #minibatch size m = int(n_train/mb) batch = np.arange(m) lr = 0.00020 EP = 20000 #needed for initializing ep = 0 y = np.zeros((mb,1)) yh = np.zeros((n_train,1)) yh2 = np.zeros((n_test,1)) L_train= np.zeros(EP+1) L_test = np.zeros(EP+1) Y_train = Y_train.reshape(len(Y_train),1) #activation function for the hidden layer is tanh def g(A): return (np.tanh(A)) def gd(A): return (1-np.square(np.tanh(A))) #setting up how long the epoch will run EP = 20 ep = 0 while ep < EP: ep += 1 yh = g(X_train.dot(w1)).dot(w2) yh2 = g(X_test.dot(w1)).dot(w2) print(yh.dtype,yh2.dtype,Y_train.dtype,Y_test.dtype) L_train[ep] = LA.norm(yh-Y_train.reshape(len(Y_train),1))/n_train L_test[ep] = LA.norm(yh2-Y_test.reshape(len(Y_test),1))/n_test np.random.shuffle(batch) for i in range(m): st = batch[i]*mb ed = (batch[i]+1)*mb h = g(X_train[st:ed].dot(w1)) y = h.dot(w2) d2 = h.T.dot(Y_train[st:ed]-y) print (d2.shape) print(h.shape, h.dtype, X_train.shape, X_train.dtype, y.shape, y.dtype) print((X_train[st:ed]).shape,(Y_train[st:ed]-y).shape) d1 = X_train[st:ed].T.dot(np.multiply((Y_train[st:ed]-y).dot(w2.T),gd(X_train[st:ed].dot(w1)))) w2 += lr*d2 w1 += lr*d1 return yh, yh2 yh, yh2 = error_values(np.copy(X_train),np.copy(X_test),np.copy(y_train),np.copy(y_test)) #Creates a plot for the training data set predicted and experimental values plt.figure(figsize=(10,8)) plt.subplot(1,2,1) plt.scatter(y_train,yh,s=0.5,color='blue') plt.title('Prediction on training data') plt.plot(np.linspace(0,12,1000),np.linspace(0,12,1000),color='black') plt.xlim((0,12)) plt.ylim((0,12)) plt.xlabel("Experiment($S*m^2/mol$)") plt.ylabel("Prediction($S*m^2/mol$)") #Creates a plot for the testing data set predicted and experimental values plt.subplot(1,2,2) plt.scatter(y_test,yh2,s=2,color='blue') plt.title('Prediction on test data') plt.xlim((0,12)) plt.ylim((0,12)) plt.xlabel("Experiment($S*m^2/mol$)") plt.ylabel("Prediction($S*m^2/mol$)") plt.plot(np.linspace(0,12,1000),np.linspace(0,12,1000),color='black') plt.show() """ Explanation: The error_values function calculates and provides the prediction values to create the plot of prediction values versus the experimental values. End of explanation """ fig = plt.figure(figsize=(10,8)) ax = fig.add_subplot(111) ax1 = fig.add_subplot(211) ax2 = fig.add_subplot(212) #Set up the training data set plot result = pd.DataFrame(columns=['Experiment','Prediction','error']) result.Experiment = y_train result.Prediction = yh result.error = e_train result = result.sort_values(['Experiment','Prediction'],ascending=[1,1]) size=0.2 ax1.set_xlim((0,2300)) ax1.set_ylim((-1,13)) ax1.scatter(np.arange(X_train.shape[0]),result.Experiment,color="blue",s=size,label='Experiment') ax1.scatter(np.arange(X_train.shape[0]),result.Prediction,color="red",s=size,label='Prediction') ax1.scatter(np.arange(X_train.shape[0]),result.Experiment+result.error,color="green",s=size,label='Experimental Error') ax1.scatter(np.arange(X_train.shape[0]),result.Experiment-result.error,color="green",s=size) ax1.set_title('Prediction on Training data') ax1.legend(loc='upper left') #setting up the test data set plot result = pd.DataFrame(columns=['Experiment','Prediction','error']) result.Experiment = y_test result.Prediction = yh2 result.error = e_test result = result.sort_values(['Experiment','Prediction'],ascending=[1,1]) size=2 ax2.set_xlim((0,260)) ax2.set_ylim((-1,13)) ax2.scatter(np.arange(X_test.shape[0]),result.Experiment,color="blue",s=size,label='Experiment') ax2.scatter(np.arange(X_test.shape[0]),result.Prediction,color="red",s=size,label='Prediction') ax2.scatter(np.arange(X_test.shape[0]),result.Experiment+result.error,color="green",s=size,label='Experimental Error') ax2.scatter(np.arange(X_test.shape[0]),result.Experiment-result.error,color="green",s=size) ax2.set_title('Prediction on test data') ax2.legend(loc='upper left') ax.set_xlabel('Data points') ax.set_ylabel('Conductivity($S*m^2/mol$)') ax.spines['top'].set_color('none') ax.spines['bottom'].set_color('none') ax.spines['left'].set_color('none') ax.spines['right'].set_color('none') ax.tick_params(labelcolor='w', top='off', bottom='off', left='off', right='off') fig.tight_layout() plt.show() """ Explanation: The following input will provides a plot that will compare the experimental and predicted data to the experimental error provided by the ILThermo database. The x and y axis limits are based upon the percent that you pick in the test_training_split function. End of explanation """
ananswam/bioscrape
inference examples/Gaussian prior example.ipynb
mit
%matplotlib inline %config InlineBackend.figure_format = "retina" from matplotlib import rcParams rcParams["savefig.dpi"] = 100 rcParams["figure.dpi"] = 100 rcParams["font.size"] = 20 """ Explanation: Parameter identification example Here is a simple toy model that we use to demonstrate the working of the inference package $\emptyset \xrightarrow[]{k_1} X \; \; \; \; X \xrightarrow[]{d_1} \emptyset$ Run the MCMC algorithm to identify parameters from the experimental data In this demonstration, we will try to use multiple trajectories of data taken under multiple initial conditions and different length of time points? End of explanation """ %matplotlib inline import bioscrape as bs from bioscrape.types import Model from bioscrape.inference import py_inference import numpy as np import pylab as plt import pandas as pd # Import a bioscrape/SBML model M = Model(sbml_filename = 'toy_sbml_model.xml') # Import data from CSV # Import a CSV file for each experiment run df = pd.read_csv('test_data.csv', delimiter = '\t', names = ['X','time'], skiprows = 1) M.set_species({'X':df['X'][0]}) # Create prior for parameters prior = {'d1' : ['gaussian', 0.2, 200]} sampler, pid = py_inference(Model = M, exp_data = df, measurements = ['X'], time_column = ['time'], nwalkers = 5, init_seed = 0.15, nsteps = 1500, sim_type = 'deterministic', params_to_estimate = ['d1'], prior = prior) """ Explanation: Using Gaussian prior for k1 End of explanation """ %matplotlib inline import bioscrape as bs from bioscrape.types import Model from bioscrape.inference import py_inference import numpy as np import pylab as plt import pandas as pd # Import a bioscrape/SBML model M = Model(sbml_filename = 'toy_sbml_model.xml') # Import data from CSV # Import a CSV file for each experiment run df = pd.read_csv('test_data.csv', delimiter = '\t', names = ['X','time'], skiprows = 1) M.set_species({'X':df['X'][0]}) prior = {'d1' : ['uniform', 0, 10], 'k1' : ['uniform', 0, 100]} sampler, pid = py_inference(Model = M, exp_data = df, measurements = ['X'], time_column = ['time'], nwalkers = 20, init_seed = 0.15, nsteps = 5500, sim_type = 'deterministic', params_to_estimate = ['d1', 'k1'], prior = prior) """ Explanation: Using uniform priors and estimating both k1 and d1 and use the pid => parameter inference object directly. End of explanation """ from bioscrape.simulator import py_simulate_model M_fit = Model(sbml_filename = 'toy_sbml_model.xml') M_fit.set_species({'X':df['X'][0]}) timepoints = pid.timepoints flat_samples = sampler.get_chain(discard=200, thin=15, flat=True) inds = np.random.randint(len(flat_samples), size=200) for ind in inds: sample = flat_samples[ind] for pi, pi_val in zip(pid.params_to_estimate, sample): M_fit.set_parameter(pi, pi_val) plt.plot(timepoints, py_simulate_model(timepoints, Model= M_fit)['X'], "C1", alpha=0.1) # plt.errorbar(, y, yerr=yerr, fmt=".k", capsize=0) # plt.plot(timepoints, list(pid.exp_data['X']), label = 'data') plt.plot(timepoints, py_simulate_model(timepoints, Model = M)['X'], "k", label="original model") plt.legend(fontsize=14) plt.xlabel("Time") plt.ylabel("[X]"); flat_samples = sampler.get_chain(discard = 200, thin = 15,flat = True) flat_samples """ Explanation: Check mcmc_results.csv for the results of the MCMC procedure and perform your own analysis. You can also plot the results as follows End of explanation """
manparvesh/manparvesh.github.io
oldsitejekyll/markdown_generator/publications.ipynb
mit
!cat publications.tsv """ Explanation: Publications markdown generator for academicpages Takes a TSV of publications with metadata and converts them for use with academicpages.github.io. This is an interactive Jupyter notebook (see more info here). The core python code is also in publications.py. Run either from the markdown_generator folder after replacing publications.tsv with one containing your data. TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style. Data format The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top. excerpt and paper_url can be blank, but the others must have values. pub_date must be formatted as YYYY-MM-DD. url_slug will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be YYYY-MM-DD-[url_slug].md and the permalink will be https://[yourdomain]/publications/YYYY-MM-DD-[url_slug] This is how the raw file looks (it doesn't look pretty, use a spreadsheet or other program to edit and create). End of explanation """ import pandas as pd """ Explanation: Import pandas We are using the very handy pandas library for dataframes. End of explanation """ publications = pd.read_csv("publications.tsv", sep="\t", header=0) publications """ Explanation: Import TSV Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or \t. I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others. End of explanation """ html_escape_table = { "&": "&amp;", '"': "&quot;", "'": "&apos;" } def html_escape(text): """Produce entities within text.""" return "".join(html_escape_table.get(c,c) for c in text) """ Explanation: Escape special characters YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely. End of explanation """ import os for row, item in publications.iterrows(): md_filename = str(item.pub_date) + "-" + item.url_slug + ".md" html_filename = str(item.pub_date) + "-" + item.url_slug year = item.pub_date[:4] ## YAML variables md = "---\ntitle: \"" + item.title + '"\n' md += """collection: publications""" md += """\npermalink: /publication/""" + html_filename if len(str(item.excerpt)) > 5: md += "\nexcerpt: '" + html_escape(item.excerpt) + "'" md += "\ndate: " + str(item.pub_date) md += "\nvenue: '" + html_escape(item.venue) + "'" if len(str(item.paper_url)) > 5: md += "\npaperurl: '" + item.paper_url + "'" md += "\ncitation: '" + html_escape(item.citation) + "'" md += "\n---" ## Markdown description for individual page if len(str(item.excerpt)) > 5: md += "\n" + html_escape(item.excerpt) + "\n" if len(str(item.paper_url)) > 5: md += "\n[Download paper here](" + item.paper_url + ")\n" md += "\nRecommended citation: " + item.citation md_filename = os.path.basename(md_filename) with open("../_publications/" + md_filename, 'w') as f: f.write(md) """ Explanation: Creating the markdown files This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (md) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. End of explanation """ !ls ../_publications/ !cat ../_publications/2009-10-01-paper-title-number-1.md """ Explanation: These files are in the publications directory, one directory below where we're working from. End of explanation """
NREL/bifacial_radiance
docs/tutorials/19 - Example Simulation - East West Sheds.ipynb
bsd-3-clause
import os import numpy as np import pandas as pd from pathlib import Path import bifacial_radiance bifacial_radiance.__version__ testfolder = testfolder = str(Path().resolve().parent.parent / 'bifacial_radiance' / 'Tutorial_01') if not os.path.exists(testfolder): os.makedirs(testfolder) demo = bifacial_radiance.RadianceObj("tutorial_19", path = testfolder) # Create a RadianceObj 'object' demo.setGround(0.62) epwfile = demo.getEPW(lat = 37.5, lon = -77.6) metdata = demo.readWeatherFile(epwfile, coerce_year=2001) timestamp = metdata.datetime.index(pd.to_datetime('2001-06-17 13:0:0 -5')) demo.gendaylit(timestamp) """ Explanation: 19 - Example Simluation: East West Sheds This simulates a particular case where you have alternating rows facing east and west, in "E-W sheds". To simulate this, we will use the bases learned in Journal 7 of using multipe scene objects. One scene object will be all the "East facing modules", while the West facing modules will be the second scene object. We have to know some geometry to offset the modules, and that is calculated below: End of explanation """ # For sanity check, we are creating the same module but with different names for each orientation. numpanels=4 ygap = 0.02 # m Spacing between modules on each shed. y=1 # m. module size, one side x=1.7 # m. module size, other side. for landscape, x > y mymoduleEast = demo.makeModule(name='test-module_East',y=y,x=x, numpanels=numpanels, ygap=ygap) mymoduleWest = demo.makeModule(name='test-module_West',y=y,x=x, numpanels=numpanels, ygap=ygap) """ Explanation: Define your shed characteristics. In this case it is a 4-up landscape setup: End of explanation """ tilt = 30 gap_between_EW_sheds = 1 # m gap_between_shed_rows = 2 #m CW = mymoduleEast.sceney ground_underneat_shed = CW * np.cos(np.radians(tilt)) pitch = ground_underneat_shed*2 + gap_between_EW_sheds + gap_between_shed_rows offset_westshed = -(ground_underneat_shed+gap_between_EW_sheds) """ Explanation: Calculate the spacings so we can offset the West Facing modules properly: End of explanation """ clearance_height = 1.2 # m nMods = 21 nRows = 7 """ Explanation: Define the other characteristics of our array: End of explanation """ sceneDict = {'tilt':tilt,'pitch':pitch,'clearance_height':clearance_height,'azimuth':90, 'nMods': nMods, 'nRows': nRows, 'appendRadfile':True} sceneObj1 = demo.makeScene(mymoduleEast, sceneDict) sceneDict2 = {'tilt':tilt,'pitch':pitch,'clearance_height':clearance_height,'azimuth':270, 'nMods': nMods, 'nRows': nRows, 'originx': offset_westshed, 'originy': 0, 'appendRadfile':True} sceneObj2 = demo.makeScene(mymoduleWest, sceneDict2) """ Explanation: Create the Scene Objects and the Scene: End of explanation """ octfile = demo.makeOct(demo.getfilelist()) """ Explanation: Finally get all the files together by creating the Octfile: End of explanation """ #!rvu -vf views\front.vp -e .01 -pe 0.3 -vp 1 -45 40 -vd 0 0.7 -0.7 MultipleObj.oct """ Explanation: View the Geometry You can check the geometry on rvu with the following commands. You can run it in jupyter/Python if you comment the line, but the program will not continue processing until you close the rvu window. ( if running rvu directly on the console, navigate to the folder where you have the simulation, and don't use the exclamation point at the beginning) Top view: End of explanation """ # !rvu -vf views\front.vp -e .01 -pe 0.3 -vp -4 -29 3.5 -vd 0 1 0 MultipleObj.oct """ Explanation: another view, close up: End of explanation """ sensorsy=4 # 1 per module. consider increasing the number but be careful with sensors in the space between modules. analysis = bifacial_radiance.AnalysisObj(octfile, demo.basename) frontscan, backscan = analysis.moduleAnalysis(sceneObj1, sensorsy=sensorsy) frontdict, backdict = analysis.analysis(octfile, "EastFacingShed", frontscan, backscan) # compare the back vs front irradiance frontscan, backscan = analysis.moduleAnalysis(sceneObj2, sensorsy=sensorsy ) frontdict2, backdict2 = analysis.analysis(octfile, "WestFacingShed", frontscan, backscan) # compare the back vs front irradiance """ Explanation: Analysis: We have to analyze the East and the West shed independently. End of explanation """
zhouqifanbdh/liupengyuan.github.io
201621198175.ipynb
mit
name = input("请输入您的姓名:") date = float(input("请输入您出生的月份.日期:")) if 3.21 <= date <= 4.19: print(name,",你是非常有性格的白羊座!") elif 4.20 <= date <= 5.20: print(name,",你是非常有性格的金牛座!") elif 5.21 <= date <= 6.21: print(name,",你是非常有性格的双子座!") elif 6.22 <= date <= 7.22: print(name,",你是非常有性格的巨蟹座!") elif 7.23 <= date <= 8.22: print(name,",你是非常有性格的狮子座!") elif 8.23 <= date <= 9.22: print(name,",你是非常有性格的处女座!") elif 9.23 <= date <= 10.23: print(name,",你是非常有性格的天秤座!") elif 10.24 <= date <= 11.22: print(name,",你是非常有性格的天蝎座!") elif 11.23 <= date <= 12.21: print(name,",你是非常有性格的射手座!") elif 1.20 <= date <= 2.18: print(name,",你是非常有性格的水瓶座!") elif 2.19 <= date <= 3.20: print(name,",你是非常有性格的双鱼座!") else: print(name,",你是非常有性格的摩羯座!") """ Explanation: 练习 1:写程序,可由键盘读入用户姓名例如Mr. right,让用户输入出生的月份与日期,判断用户星座,假设用户是金牛座,则输出,Mr. right,你是非常有性格的金牛座! End of explanation """ m = int(input("请输入一个整数:")) n = int(input("请输入一个整数:")) temp = '' total = 1 x = input("请问您想做什么运算?加和运算请按‘+’,乘积运算请按‘*’,求余运算请按‘%’,其他运算请按‘//’,回车结束。") if x == '+': if m > n: temp = m m = n n = temp else: print((m+n)*(n-m+1)/2) elif x == '*': total = m*n if m > n: temp = m m = n n = temp else: while (m+1) <= (n-1): total = total*(m+1)*(n-1) m += 1 n -= 1 print(total) elif x == '%': print(m%n) else: print(m//n) """ Explanation: 练习 2:写程序,可由键盘读入两个整数m与n(n不等于0),询问用户意图,如果要求和则计算从m到n的和输出,如果要乘积则计算从m到n的积并输出,如果要求余数则计算m除以n的余数的值并输出,否则则计算m整除n的值并输出。 End of explanation """ x = float(input("请输入PM2.5的数值:")) if x > 500: print("您应该打开空气净化器,带防雾霾口罩!") else: print("空气质量还不错,适宜出行!") """ Explanation: 练习 3:写程序,能够根据北京雾霾PM2.5数值给出对应的防护建议。如当PM2.5数值大于500,则应该打开空气净化器,戴防雾霾口罩等 End of explanation """ sp = input("请输入一个英文动词的单数形式,回车结束:") if sp.endswith("ch"): print(sp+"es") else: print(sp+"s") """ Explanation: 练习 4:英文单词单数转复数,要求输入一个英文动词(单数形式),能够得到其复数形式,或给出单数转复数形式的建议(提示,some_string.endswith(some_letter)函数可以判断某字符串结尾字符)。 End of explanation """ x = input("请输入一行数字:") if len(x) <= 100: print("\n") else: print(x) """ Explanation: 尝试性练习:写程序,能够在屏幕上显示空行。 End of explanation """ num = int(input("请输入整数的个数,回车结束:")) if num < 2: print("输入个数不能少于2,请您重新输入:") else: m = int(input("请输入一个整数,回车结束:")) n = int(input("请再输入一个整数,回车结束:")) if m >= n: max_number = m mid_number = n else: max_number = n mid_number = m i = 2 while i < num: x = int(input("请再输入一个整数,回车结束:")) if mid_number < x < max_number: mid_number = x if x > max_number: mid_number = max_number max_number = x i += 1 print(mid_number) """ Explanation: 挑战性练习:写程序,由用户输入一些整数,能够得到几个整数中的次大值(第二大的值)并输出。 End of explanation """
intel-analytics/analytics-zoo
docs/docs/colab-notebook/orca/quickstart/autoestimator_pytorch_lenet_mnist.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Explanation: <a href="https://colab.research.google.com/github/intel-analytics/analytics-zoo/blob/master/docs/docs/colab-notebook/orca/quickstart/autoestimator_pytorch_lenet_mnist.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> Copyright 2018 Analytics Zoo Authors. End of explanation """ # Install jdk8 !apt-get install openjdk-8-jdk-headless -qq > /dev/null import os # Set environment variable JAVA_HOME. os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" !update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java !java -version """ Explanation: Environment Preparation Install Java 8 Run the cell on the Google Colab to install jdk 1.8. Note: if you run this notebook on your computer, root permission is required when running the cell to install Java 8. (You may ignore this cell if Java 8 has already been set up in your computer). End of explanation """ import sys # Set current python version python_version = "3.7.10" # Install Miniconda !wget https://repo.continuum.io/miniconda/Miniconda3-4.5.4-Linux-x86_64.sh !chmod +x Miniconda3-4.5.4-Linux-x86_64.sh !./Miniconda3-4.5.4-Linux-x86_64.sh -b -f -p /usr/local # Update Conda !conda install --channel defaults conda python=$python_version --yes !conda update --channel defaults --all --yes # Append to the sys.path _ = (sys.path .append(f"/usr/local/lib/python3.7/site-packages")) os.environ['PYTHONHOME']="/usr/local" """ Explanation: Install Analytics Zoo Conda is needed to prepare the Python environment for running this example. Note: The following code cell is specific for setting up conda environment on Colab; for general conda installation, please refer to the install guide for more details. End of explanation """ # Install latest pre-release version of Analytics Zoo # Installing Analytics Zoo from pip will automatically install pyspark, bigdl, and their dependencies. !pip install --pre --upgrade analytics-zoo[ray] # Install python dependencies !pip install torch==1.7.1 torchvision==0.8.2 tensorboardx==2.2 """ Explanation: You can install the latest pre-release version using pip install --pre --upgrade analytics-zoo[ray]. End of explanation """ # import necesary libraries and modules from __future__ import print_function import os import argparse from zoo.orca import init_orca_context, stop_orca_context from zoo.orca import OrcaContext """ Explanation: Automated hyper-parameter search for PyTorch using Orca APIs In this guide we will describe how to enable automated hyper-parameter search for PyTorch using Orca AutoEstimator in 5 simple steps. End of explanation """ # recommended to set it to True when running Analytics Zoo in Jupyter notebook. OrcaContext.log_output = True # (this will display terminal's stdout and stderr in the Jupyter notebook). cluster_mode = "local" if cluster_mode == "local": init_orca_context(cores=4, memory="2g", init_ray_on_spark=True) # run in local mode elif cluster_mode == "k8s": init_orca_context(cluster_mode="k8s", num_nodes=2, cores=4, init_ray_on_spark=True) # run on K8s cluster elif cluster_mode == "yarn": init_orca_context( cluster_mode="yarn-client", cores=4, num_nodes=2, memory="2g", init_ray_on_spark=True, driver_memory="10g", driver_cores=1) # run on Hadoop YARN cluster """ Explanation: Step 1: Init Orca Context End of explanation """ import torch import torch.nn as nn import torch.nn.functional as F class LeNet(nn.Module): def __init__(self, fc1_hidden_size=500): super(LeNet, self).__init__() self.conv1 = nn.Conv2d(1, 20, 5, 1) self.conv2 = nn.Conv2d(20, 50, 5, 1) self.fc1 = nn.Linear(4*4*50, fc1_hidden_size) self.fc2 = nn.Linear(fc1_hidden_size, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2, 2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2, 2) x = x.view(-1, 4*4*50) x = F.relu(self.fc1(x)) x = self.fc2(x) return F.log_softmax(x, dim=1) criterion = nn.NLLLoss() """ Explanation: This is the only place where you need to specify local or distributed mode. View Orca Context for more details. Note: You should export HADOOP_CONF_DIR=/path/to/hadoop/conf/dir when you run on Hadoop YARN cluster. Step 2: Define the Model You may define your model, loss and optimizer in the same way as in any standard PyTorch program. End of explanation """ def model_creator(config): model = LeNet(fc1_hidden_size=config["fc1_hidden_size"]) return model def optim_creator(model, config): return torch.optim.Adam(model.parameters(), lr=config["lr"]) """ Explanation: After defining your model, you need to define a Model Creator Function that returns an instance of your model, and a Optimizer Creator Function that returns a PyTorch optimizer. Note that both the Model Creator Function and the Optimizer Creator Function should take config as input and get the hyper-parameter values from config. End of explanation """ import torch from torchvision import datasets, transforms torch.manual_seed(0) dir = '/tmp/dataset' test_batch_size = 640 def train_loader_creator(config): train_loader = torch.utils.data.DataLoader( datasets.MNIST(dir, train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=config["batch_size"], shuffle=True) return train_loader def test_loader_creator(config): test_loader = torch.utils.data.DataLoader( datasets.MNIST(dir, train=False, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=test_batch_size, shuffle=False) return test_loader """ Explanation: Step 3: Define Dataset You can define the train and validation datasets using Data Creator Function that has one parameter of config and returns a PyTorch DataLoader. End of explanation """ from zoo.orca.automl import hp search_space = { "fc1_hidden_size": hp.choice([500, 600]), "lr": hp.choice([0.001, 0.003]), "batch_size": hp.choice([160, 320, 640]), } """ Explanation: Step 4: Define search space You should define a dictionary as your hyper-parameter search space. The keys are hyper-parameter names which should be the same with those in your creators, and you can specify how you want to sample each hyper-parameter in the values of the search space. See automl.hp for more details. End of explanation """ from zoo.orca.automl.auto_estimator import AutoEstimator auto_est = AutoEstimator.from_torch(model_creator=model_creator, optimizer=optim_creator, loss=criterion, logs_dir="/tmp/zoo_automl_logs", resources_per_trial={"cpu": 2}, name="lenet_mnist") """ Explanation: Step 5: Automatically fit and search with Orca AutoEstimator First, create an AutoEstimator. You can refer to AutoEstimator API doc for more details. End of explanation """ auto_est.fit(data=train_loader_creator, validation_data=test_loader_creator, search_space=search_space, n_sampling=2, epochs=1, metric="accuracy") """ Explanation: Next, use the auto estimator to fit and search for the best hyper-parameter set. End of explanation """ best_model = auto_est.get_best_model() best_config = auto_est.get_best_config() print(best_config) """ Explanation: Finally, you can get the best learned model and the best hyper-parameters. End of explanation """ test_loader = test_loader_creator(best_config) best_model.eval() correct = 0 with torch.no_grad(): for data, target in test_loader: output = best_model(data) pred = output.data.max(1, keepdim=True)[1] correct += pred.eq(target.data.view_as(pred)).sum().numpy() accuracy = 100. * correct / len(test_loader.dataset) print(f"accuracy is {accuracy}%") """ Explanation: You can use the best learned model and the best hyper-parameters as you want. Here, we demonstrate how to evaluate on the test dataset. End of explanation """ # stop orca context when program finishes stop_orca_context() """ Explanation: You can find the accuracy of the best model has reached 98%. End of explanation """
sdpython/ensae_teaching_cs
_doc/notebooks/td2a_ml/td2a_timeseries_correction.ipynb
mit
from jyquickhelper import add_notebook_menu add_notebook_menu() %matplotlib inline """ Explanation: 2A.ml - Séries temporelles - correction Prédictions sur des séries temporelles. End of explanation """ import pandas data = pandas.read_csv("xavierdupre_sessions.csv", sep="\t") data.set_index("Date", inplace=True) data.head() data.plot(figsize=(12,4)); data[-365:].plot(figsize=(12,4)); """ Explanation: Une série temporelles On récupère le nombre de sessions d'un site web. End of explanation """ from statsmodels.tsa.tsatools import detrend notrend = detrend(data['Sessions']) data["notrend"] = notrend data["trend"] = data['Sessions'] - notrend data.tail() data.plot(y=["Sessions", "notrend", "trend"], figsize=(14,4)); """ Explanation: Trends Fonction detrend. End of explanation """ notrend2 = detrend(data['Sessions'], order=2) data["notrend2"] = notrend2 data["trend2"] = data["Sessions"] - data["notrend2"] data.plot(y=["Sessions", "notrend2", "trend2"], figsize=(14,4)); """ Explanation: On essaye de calculer une tendance en minimisant : $Y=\alpha + \beta t + \gamma t^2$. End of explanation """ import numpy data["logSess"] = data["Sessions"].apply(lambda x: numpy.log(x+1)) lognotrend = detrend(data['logSess']) data["lognotrend"] = lognotrend data["logtrend"] = data["logSess"] - data["lognotrend"] data.plot(y=["logSess", "lognotrend", "logtrend"], figsize=(14,4)); """ Explanation: On passe au log. End of explanation """ from statsmodels.tsa.seasonal import seasonal_decompose res = seasonal_decompose(data["Sessions"].values.ravel(), freq=7, two_sided=False) data["season"] = res.seasonal data["trendsea"] = res.trend data.plot(y=["Sessions", "season", "trendsea"], figsize=(14,4)); data[-365:].plot(y=["Sessions", "season", "trendsea"], figsize=(14,4)); res = seasonal_decompose(data["Sessions"].values.ravel() + 1, freq=7, two_sided=False, model='multiplicative') data["seasonp"] = res.seasonal data["trendseap"] = res.trend data[-365:].plot(y=["Sessions", "seasonp", "trendseap"], figsize=(14,4)); """ Explanation: La série est assez particulière. Elle donne l'impression d'avoir un changement de régime. On extrait la composante saisonnière avec seasonal_decompose. End of explanation """ from seasonal import fit_seasons cv_seasons, trend = fit_seasons(data["Sessions"]) print(cv_seasons) # data["cs_seasons"] = cv_seasons data["trendcs"] = trend data[-365:].plot(y=["Sessions", "trendcs", "trendsea"], figsize=(14,4)); """ Explanation: Enlever la saisonnalité sans la connaître Avec fit_seasons. End of explanation """ import matplotlib.pyplot as plt from statsmodels.graphics.tsaplots import plot_acf, plot_pacf fig = plt.figure(figsize=(12,8)) ax1 = fig.add_subplot(211) fig = plot_acf(data["Sessions"], lags=40, ax=ax1) ax2 = fig.add_subplot(212) fig = plot_pacf(data["Sessions"], lags=40, ax=ax2); """ Explanation: Autocorrélograme On s'inspire de l'exemple : Autoregressive Moving Average (ARMA): Sunspots data. End of explanation """
evanmiltenburg/python-for-text-analysis
Chapters-colab/Chapter_11_Functions_and_scope.ipynb
apache-2.0
%%capture !wget https://github.com/cltl/python-for-text-analysis/raw/master/zips/Data.zip !wget https://github.com/cltl/python-for-text-analysis/raw/master/zips/images.zip !wget https://github.com/cltl/python-for-text-analysis/raw/master/zips/Extra_Material.zip !unzip Data.zip -d ../ !unzip images.zip -d ./ !unzip Extra_Material.zip -d ../ !rm Data.zip !rm Extra_Material.zip !rm images.zip """ Explanation: <a href="https://colab.research.google.com/github/cltl/python-for-text-analysis/blob/colab/Chapters-colab/Chapter_11_Functions_and_scope.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> End of explanation """ print("Happy Birthday to you!") print("Happy Birthday to you!") print("Happy Birthday, dear Emily.") print("Happy Birthday to you!") """ Explanation: Chapter 11: Functions and scope We use an example from this website to show you some of the basics of writing a function. We use some materials from this other Python course. We have seen that Python has several built-in functions (e.g. print() or max()). But you can also create a function. A function is a reusable block of code that performs a specific task. Once you have defined a function, you can use it at any place in your Python script. You can even import a function from an external module (as we will see in the next chapter). Therefore, they are beneficial for tasks that you will perform more often. Plus, functions are a convenient way to order your code and make it more readable! At the end of this chapter, you will be able to: write a function work with function inputs understand the difference between (keyword and positional) arguments and parameters return zero, one, or multiple values write function docstrings understand the scope of variables store your function in a Python module and call it debug your functions If you want to learn more about these topics, you might find the following link useful: Tutorial: Defining Functions of your Own The docstrings main formats PEP 287 -- reStructured Docstring Format Introduction to assert Now let's get started! If you have questions about this chapter, please contact us (cltl.python.course@gmail.com). 1. Writing a function A function is an isolated chunk of code that has a name, gets zero or more parameters, and returns a value. In general, a function will do something for you based on the input parameters you pass it, and it will typically return a result. You are not limited to using functions available in the standard library or the ones provided by external parties. You can also write your own functions! Whenever you are writing a function, you need to think of the following things: * What is the purpose of the function? * How should I name the function? * What input does the function need? * What output should the function generate? 1.1. Why use a function? There are several good reasons why functions are a vital component of any non-ridiculous programmer: encapsulation: wrapping a piece of useful code into a function so that it can be used without knowledge of the specifics generalization: making a piece of code useful in varied circumstances through parameters manageability: Dividing a complex program up into easy-to-manage chunks maintainability: using meaningful names to make the program better readable and understandable reusability: a good function may be useful in multiple programs recursion! 1.2. How to define a function Let's say we want to sing a birthday song to Emily. Then we print the following lines: End of explanation """ def happy_birthday_to_emily(): # Function definition """ Print a birthday song to Emily. """ print("Happy Birthday to you!") print("Happy Birthday to you!") print("Happy Birthday, dear Emily.") print("Happy Birthday to you!") """ Explanation: This could be the purpose of a function: to print the lines of a birthday song for Emily. Now, we define a function to do this. Here is how you define a function: write def; the name you would like to call your function; a set of parentheses containing the parameter(s) of your function; a colon; a docstring describing what your function does; the function definition; ending with a return statement Statements must be indented so that Python knows what belongs in the function and what not. Functions are only executed when you call them. It is good practice to define your functions at the top of your program or in another Python module. We give the function a clear name, happy_birthday_to_emily, and we define the function as shown below. Note that we specify what it does in the docstring at the beginning of the function: End of explanation """ # function definition: def happy_birthday_to_emily(): # Function definition """ Print a birthday song to Emily. """ print("Happy Birthday to you!") print("Happy Birthday to you!") print("Happy Birthday, dear Emily.") print("Happy Birthday to you!") # function call: print('Function call 1') happy_birthday_to_emily() print() # We can call the function as many times as we want (but we define it only once) print('Function call 2') happy_birthday_to_emily() print() print('Function call 3') happy_birthday_to_emily() print() # This will not call the function print('This is not a function call') happy_birthday_to_emily """ Explanation: If we execute the code above, we don't get any output. That's because we only told Python: "Here's a function to do this, please remember it." If we actually want Python to execute everything inside this function, we have to call it: 1.3 How to call a function It is important to distinguish between a function definition and a function call. We illustrate this in 1.3.1. You can also call functions from within other functions. This will become useful when you split up your code into small chunks that can be combined to solve a larger problem. This is illustrated in 1.3.2. 1.3.1) A simple function call A function is defined once. After the definition, Python has remembered what this function does in its memory. A function is executed/called as many times as we like. When calling a function, you should always use parenthesis. End of explanation """ def new_line(): """Print a new line.""" print() def two_new_lines(): """Print two new lines.""" new_line() new_line() print("Printing a single line...") new_line() print("Printing two lines...") two_new_lines() print("Printed two lines") """ Explanation: 1.3.2 Calling a function from within another function We can also define functions that call other functions, which is very helpful if we want to split our task into smaller, more manageable subtasks: End of explanation """ help(happy_birthday_to_emily) type(happy_birthday_to_emily) """ Explanation: You can do the same tricks that we learnt to apply on the built-in functions, like asking for help or for a function type: End of explanation """ # function definition with using the parameter `name' def happy_birthday(name): """ Print a birthday song with the "name" of the person inserted. """ print("Happy Birthday to you!") print("Happy Birthday to you!") print(f"Happy Birthday, dear {name}.") print("Happy Birthday to you!") # function call using specifying the value of the argument happy_birthday("James") """ Explanation: The help we get on a function will become more interesting once we learn about function inputs and outputs ;-) 1.4 Working with function input 1.4.1 Parameters and arguments We use parameters and arguments to make a function execute a task depending on the input we provide. For instance, we can change the function above to input the name of a person and print a birthday song using this name. This results in a more generic function. To understand how we use parameters and arguments, keep in mind the distinction between function definition and function call. Parameter: The variable name in the function definition below is a parameter. Variables used in function definitions are called parameters. Argument: The variable my_name in the function call below is a value for the parameter name at the time when the function is called. We refer to such variables as arguments. We use arguments so we can direct the function to do different kinds of work when we call it at different times. End of explanation """ my_name="James" happy_birthday(my_name) """ Explanation: We can also store the name in a variable: End of explanation """ happy_birthday() """ Explanation: If we forgot to specify the name, we get an error: End of explanation """ def multiply(x, y): """Multiply two numeric values.""" result = x * y print(result) multiply(2020,5278238) multiply(2,3) """ Explanation: Functions can have multiple parameters. We can for example multiply two numbers in a function (using the two parameters x and y) and then call the function by giving it two arguments: End of explanation """ def multiply(x, y, third_number=1): # x and y are positional parameters, third_number is a keyword parameter """Multiply two or three numbers and print the result.""" result=x*y*third_number print(result) multiply(2,3) # We only specify values for the positional parameters multiply(2,3,third_number=4) # We specify values for both the positional parameters, and the keyword parameter """ Explanation: 1.4.2 Positional vs keyword parameters and arguments The function definition tells Python which parameters are positional and which are keyword. As you might remember, positional means that you have to give an argument for that parameter; keyword means that you can give an argument value, but this is not necessary because there is a default value. So, to summarize these two notes, we distinguish between: 1) positional parameters: (we indicate these when defining a function, and they are compulsory when calling the function) 2) keyword parameters: (we indicate these when defining a function, but they have a default value - and are optional when calling the function) For example, if we want to have a function that can either multiply two or three numbers, we can make the third parameter a keyword parameter with a default of 1 (remember that any number multiplied with 1 results in that number): End of explanation """ multiply(3) """ Explanation: If we do not specify a value for a positional parameter, the function call will fail (with a very helpful error message): End of explanation """ def multiply(x, y): """Multiply two numbers and return the result.""" multiplied = x * y return multiplied #here we assign the returned value to variable z result = multiply(2, 5) print(result) """ Explanation: 1.5 Output: the return statement Functions can have a return statement. The return statement returns a value back to the caller and always ends the execution of the function. This also allows us to use the result of a function outside of that function by assigning it to a variable: End of explanation """ print(multiply(30,20)) """ Explanation: We can also print the result directly (without assigning it to a variable), which gives us the same effect as using the print statements we used before: End of explanation """ def multiply_no_return(x, y): """Multiply two numbers and does not return the result.""" result = x * y is_this_a_result = multiply_no_return(2,3) print(is_this_a_result) """ Explanation: If we assign the result to a variable, but do not use the return statement, the function cannot return it. Instead, it returns None (as you can try out below). This is important to realize: even functions without a return statement do return a value, albeit a rather boring one. This value is called None (it’s a built-in name). You have seen this already with list methods - for example list.append(val) adds a value to a list, but does not return anything explicitly. End of explanation """ def calculate(x,y): """Calculate product and sum of two numbers.""" product = x * y summed = x + y #we return a tuple of values return product, summed # the function returned a tuple and we unpack it to var1 and var2 var1, var2 = calculate(10,5) print("product:",var1,"sum:",var2) """ Explanation: Returning multiple values Similarly as the input, a function can also return multiple values as output. We call such a collection of values a tuple (does this term sound familiar ;-)?). End of explanation """ #this will assign `var` to a tuple: var = calculate(10,5) print(var) #this will generate an error var1, var2, var3 = calculate(10,5) """ Explanation: Make sure you actually save your 2 values into 2 variables, or else you end up with errors or unexpected behavior: End of explanation """ def sum_and_diff_len_strings(string1, string2): """ Return the sum of and difference between the lengths of two strings. """ sum_strings = len(string1) + len(string2) diff_strings = len(string1) - len(string2) return sum_strings, diff_strings sum_strings, diff_strings = sum_and_diff_len_strings("horse", "dog") print("Sum:", sum_strings) print("Difference:", diff_strings) """ Explanation: Saving the resulting values in different variables can be useful when you want to use them in different places in your code: End of explanation """ def my_function(param1, param2): """ This is a reST style. :param param1: this is a first param :param param2: this is a second param :returns: this is a description of what is returned """ return """ Explanation: 1.6 Documenting your functions with docstrings Docstring is a string that occurs as the first statement in a function definition. For consistency, always use """triple double quotes""" around docstrings. Triple quotes are used even though the string fits on one line. This makes it easy to expand it later. There's no blank line either before or after the docstring. The docstring is a phrase ending in a period. It prescribes the function or method's effect as a command ("Do this", "Return that"), not as a description; e.g., don't write "Returns the pathname ...". In practice, there are several formats for writing docstrings, and all of them contain more information than the single sentence description we mention here. Probably the most well-known format is reStructured Text. Here is an example of a function description in reStructured Text (reST): End of explanation """ def is_even(p): """Check whether a number is even.""" if p % 2 == 1: return False else: return True """ Explanation: You can see that this docstring describes the function goal, its parameters, its outputs, and the errors it raises. It is a good practice to write a docstring for your functions, so we will always do this! For now we will stick with single-sentence docstrings You can read more about this topic here, here, and here. 1.7 Debugging a function Sometimes, it can hard to write a function that works perfectly. A common practice in programming is to check whether the function performs as you expect it to do. The assert statement is one way of debugging your function. The syntax is as follows: assert code == your expected output,message to show when code does not work as you'd expected Let's try this on our simple function. End of explanation """ input_value = 2 expected_output = True actual_output = is_even(input_value) assert actual_output == expected_output, f'expected {expected_output}, got {actual_output}' """ Explanation: If the function output is what you expect, Python will show nothing. End of explanation """ def is_even(p): """Check whether a number is even.""" if p % 2 == 1: return False else: return False input_value = 2 expected_output = True actual_output = is_even(input_value) assert actual_output == expected_output, f'expected {expected_output}, got {actual_output}' """ Explanation: However, when the actual output is different from what we expected, we got an error. Let's say we made a mistake in writing the function. End of explanation """ from utils_chapter11 import happy_birthday happy_birthday('George') from utils_chapter11 import multiply multiply(1,2) from utils_chapter11 import is_even is_it_even = is_even(5) print(is_it_even) """ Explanation: 1.8 Storing a function in a Python module Since Python functions are nice blocks of code with a clear focus, wouldn't it be nice if we can store them in a file? By doing this, we make our code visually very appealing since we are only left with functions calls instead of function definitions. Please open the file utils_chapter11.py (is in the same folder as the notebook you are now reading). In it, you will find three of the functions that we've shown so far in this notebook. So, how can we use those functions? We can import the function using the following syntax: from NAME OF FILE WITHOUT .PY import function name End of explanation """ def setx(): """Set the value of a variable to 1.""" x = 1 setx() print(x) """ Explanation: 2. Variable scope Please note: scope is a hard concept to grasp, but we think it is important to introduce it here. We will do our best to repeat it during the course. Any variables you declare in a function, as well as the arguments that are passed to a function will only exist within the scope of that function, i.e., inside the function itself. The following code will produce an error, because the variable x does not exist outside of the function: End of explanation """ def setx(): """Set the value of a variable to 1.""" x = 1 return x setx() print(x) """ Explanation: Even when we return x, it does not exist outside of the function: End of explanation """ x = 0 def setx(): """Set the value of a variable to 1.""" x = 1 setx() print(x) """ Explanation: Also consider this: End of explanation """ x = 1 def getx(): """Print the value of a variable x.""" print(x) getx() """ Explanation: In fact, this code has produced two completely unrelated x's! So, you can not read a local variable outside of the local context. Nevertheless, it is possible to read a global variable from within a function, in a strictly read-only fashion. End of explanation """ a=3 b=2 def setb(): """Set the value of a variable b to 11.""" b=11 c=20 print("Is 'a' defined locally in the function:", 'a' in locals()) print("Is 'b' defined locally in the function:", 'b' in locals()) print("Is 'b' defined globally:", 'b' in globals()) setb() print("Is 'a' defined globally:", 'a' in globals()) print("Is 'b' defined globally:", 'b' in globals()) print("Is 'c' defined globally:", 'c' in globals()) """ Explanation: You can use two built-in functions in Python when you are unsure whether a variable is local or global. The function locals() returns a list of all local variables, and the function globals() - a list of all global variables. Note that there are many non-interesting system variables that these functions return, so in practice it is best to check for membership with the in operator. For example: End of explanation """ def setb_again(): """Set the value of a variable to 3.""" b=3 print("in 'setb_again' b =", b) def setb(): """Set the value of a variable b to 2.""" b=2 setb_again() print("in 'setb' b =", b) b=1 setb() print("global b =", b) """ Explanation: Finally, note that the local context stays local to the function, and is not shared even with other functions called within a function, for example: End of explanation """ # you code here """ Explanation: We call the function setb() from the global context, and we call the function setb_again() from the context of the function setb(). The variable b in the function setb_again() is set to 3, but this does not affect the value of this variable in the function setb() which is still 2. And as we saw before, the changes in setb() do not influence the value of the global variable (b=1). Exercises Exercise 1: Write a function that converts meters to centimeters and prints the resulting value. End of explanation """ # function to modify: def multiply(x, y, third_number=1): """Multiply two or three numbers and print the result.""" result=x*y*third_number print(result) """ Explanation: Exercise 2: Add another keyword parameter message to the multiply function, which will allow a user to print a message. The default value of this keyword parameter should be an empty string. Test this with 2 messages of your choice. Also test it without specifying a value for the keyword argument when calling a function. End of explanation """ def new_line(): """Print a new line.""" print() # you code here """ Explanation: Exercise 3: Write a function called multiple_new_lines which takes as argument an integer and prints that many newlines by calling the function newLine. End of explanation """ def happy_birthday_to_you(): # your code here # original function - replace the print statements by the happy_birthday_to_you() function: def happy_birthday(name): """ Print a birthday song with the "name" of the person inserted. """ print("Happy Birthday to you!") print("Happy Birthday to you!") print("Happy Birthday, dear " + name + ".") print("Happy Birthday to you!") """ Explanation: Exercise 4: Let's refactor the happy birthday function to have no repetition. Note that previously we print "Happy birthday to you!" three times. Make another function happy_birthday_to_you() that only prints this line and call it inside the function happy_birthday(name). End of explanation """ def multiply(x, y, third_number=1): """Multiply two or three numbers and print the result.""" result=x*y*third_number return result print(multiply(1+1,6-2)) print(multiply(multiply(4,2),multiply(2,5))) print(len(str(multiply(10,100)))) """ Explanation: Exercise 5: Try to figure out what is going on in the following examples. How does Python deal with the order of calling functions? End of explanation """ def switch_two_values(x,y): # your code here a='orange' b='apple' a,b = switch_two_values(a,b) # `a` should contain "apple" after this call, and `b` should contain "orange" print(a,b) """ Explanation: Exercise 6: Complete this code to switch the values of two variables: End of explanation """
JohnGriffiths/ConWhAt
docs/examples/exploring_conwhat_atlases.ipynb
bsd-3-clause
# ConWhAt stuff from conwhat import VolConnAtlas,StreamConnAtlas,VolTractAtlas,StreamTractAtlas from conwhat.viz.volume import plot_vol_scatter,plot_vol_and_rois_nilearn # Neuroimaging stuff import nibabel as nib from nilearn.plotting import plot_stat_map,plot_surf_roi # Viz stuff %matplotlib inline from matplotlib import pyplot as plt import seaborn as sns # Generic stuff import glob, numpy as np, pandas as pd, networkx as nx """ Explanation: Exploring ConWhAt Atlases There are four different atlas types in ConWhat, corresponding to the 2 ontology types (Tract-based / Connectivity-Based) and 2 representation types (Volumetric / Streamlinetric). (More on this schema here) End of explanation """ atlas_dir = '/scratch/hpc3230/Data/conwhat_atlases' atlas_name = 'CWL2k8Sc33Vol3d100s_v01' """ Explanation: We'll start with the scale 33 lausanne 2008 volumetric connectivity-based atlas. Define the atlas name and top-level directory location End of explanation """ vca = VolConnAtlas(atlas_dir=atlas_dir + '/' + atlas_name, atlas_name=atlas_name) """ Explanation: Initialize the atlas class End of explanation """ vca.atlas_name vca.atlas_dir """ Explanation: This atlas object contains various pieces of general information End of explanation """ vca.vfms.head() """ Explanation: Information about each atlas entry is contained in the vfms attribute, which returns a pandas dataframe End of explanation """ vca.Gnx.edges[(10,35)] """ Explanation: Additionally, connectivity-based atlases also contain a networkx graph object vca.Gnx, which contains information about each connectome edge End of explanation """ img = vca.get_vol_from_vfm(1637) plot_stat_map(img) """ Explanation: Individual atlas entry nifti images can be grabbed like so End of explanation """ vca.bbox.ix[1637] ax = plot_vol_scatter(vca.get_vol_from_vfm(1),c='r',bg_img='nilearn_destrieux', bg_params={'s': 0.1, 'c':'k'},figsize=(20, 15)) ax.set_xlim([0,200]); ax.set_ylim([0,200]); ax.set_zlim([0,200]); """ Explanation: Or alternatively as a 3D scatter plot, along with the x,y,z bounding box End of explanation """ fig, ax = plt.subplots(figsize=(16,12)) sns.heatmap(np.log1p(vca.weights),xticklabels=vca.region_labels, yticklabels=vca.region_labels,ax=ax); plt.tight_layout() """ Explanation: We can also view the weights matrix like so: End of explanation """ vca.cortex """ Explanation: The vca object also contains x,y,z bounding boxes for each structure We also stored additional useful information about the ROIs in the associated parcellation, including cortical/subcortical labels End of explanation """ vca.hemispheres """ Explanation: ...hemisphere labels End of explanation """ vca.region_mapping_fsav_lh vca.region_mapping_fsav_rh """ Explanation: ...and region mappings to freesurfer's fsaverage brain End of explanation """ f = '/opt/freesurfer/freesurfer/subjects/fsaverage/surf/lh.inflated' vtx,tri = nib.freesurfer.read_geometry(f) plot_surf_roi([vtx,tri],vca.region_mapping_fsav_lh); """ Explanation: which can be used for, e.g. plotting ROI data on a surface End of explanation """
birdsarah/bokeh-miscellany
0.12.14 bugs - test in 0.12.5.ipynb
gpl-2.0
N = 10000 x = np.random.normal(0, np.pi, N) y = np.sin(x) + np.random.normal(0, 0.2, N) p = figure(webgl=True) p.scatter(x, y, alpha=0.1) show(p) """ Explanation: WebGL problems Drag around canvas is shifted down, cut off at top spilling over bottom. Bad in 0.12.14 and 0.12.15dev3 Good in 0.12.10 End of explanation """ !conda list | egrep "jupyter|notebook" p = figure(plot_height=200, sizing_mode='scale_width') p.scatter(x, y, alpha=0.1) show(p) N = 4000 x = np.random.random(size=N) * 100 y = np.random.random(size=N) * 100 radii = np.random.random(size=N) * 1.5 colors = [ "#%02x%02x%02x" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y) ] TOOLS="hover,crosshair,pan,wheel_zoom,box_zoom,undo,redo,reset,tap,save,box_select,poly_select,lasso_select," p = figure(tools=TOOLS, sizing_mode='scale_width') p.scatter(x, y, radius=radii, fill_color=colors, fill_alpha=0.6, line_color=None) show(p) save(p, 'color_scatter.html') """ Explanation: Responsive in notebook Spills a scroll bar. Not a problem in a vanilla save file. Bad in 0.12.6, 0.12.7, 0.12.9, 0.12.10, 0.12.14, 0.12.15dev3 God in 0.12.5 End of explanation """
exa-analytics/atomic
docs/source/notebooks/01_basics.ipynb
apache-2.0
import exatomic exatomic.__version__ """ Explanation: Welcome to exatomic This notebook demonstrates some basics of working with exatomic. End of explanation """ exatomic.Universe? """ Explanation: Getting help in the Jupyter notebook is easy, just put a "?" after a class or function. Don't forget to use tab to help with syntax completion End of explanation """ uni = exatomic.Universe() uni """ Explanation: The Universe object contains all of the information about a simulation, nuclear coordinates, orbitals, etc. Data is stored in pandas DataFrames (see pandas for more information) End of explanation """ atom = exatomic.Atom.from_dict({'x': [0.0, 0.0], 'y': [0.0, 0.0], 'z': [-0.34, 0.34], 'symbol': ["H", "H"], 'frame': [0, 0]}) uni = exatomic.Universe(atom=atom) uni.atom """ Explanation: Empty universes can be useful...but it is more interesting with data Note that exatomic uses Hartree atomic units End of explanation """ uni.frame # This was computed on-the-fly as we didn't instantiate it above """ Explanation: The frame column is how we track state (e.g. time, theory, etc.) The simplest dataframe is the frame object which by default only contains the number of atoms End of explanation """ exatomic.UniverseWidget(uni) """ Explanation: Visualization of this simple universe can be accomplished directly in the notebook End of explanation """ uni.atom_two """ Explanation: In building the visualization, bonds were automatically computed For small systems this is the default behavior, but for large systems it is not End of explanation """ uni.molecule """ Explanation: Note again that distances are in atomic units End of explanation """ uni.compute_molecule_com() uni.molecule """ Explanation: Center of masses can also be computed End of explanation """
mjuenema/ipython-notebooks
dnspython-resolver.ipynb
bsd-2-clause
import dns.rdataclass dns.rdataclass.IN """ Explanation: dnspython Resolver The socket module of the Python standard library provides basic functions for resolving hostnames (gethostbyname and gethostbyname_ex) as implemented by the C library. The resolver of the dnspython allows to bypass the C library and query DNS servers directly. pip install dnspython Constants Before we start querying DNS servers it is useful to know about some constants and convenience functions dnspython provides. The dns.rdataclass module defines the DNS data clasess out of which only Internet IN is relevant (or has anyone ever seen CHAOS or HESIOD implemented???). End of explanation """ import dns.rdatatype dns.rdatatype.A, dns.rdatatype.MX, dns.rdatatype.PTR, dns.rdatatype.SOA, dns.rdatatype.TXT """ Explanation: The constants of the dns.rdatatype module are a lot more important as they list the types of records commonly served by DNS servers. End of explanation """ import dns.resolver dns.resolver.get_default_resolver().nameservers dns.resolver.get_default_resolver().domain dns.resolver.get_default_resolver().search dns.resolver.get_default_resolver().timeout """ Explanation: Using the default resolvers On Unix systems the file /etc/resolv.conf may list (on Linux up to three) nameservers that the C library functions shall query for resolving host names that are not found in /etc/hosts or other local databases like LDAP or NIS as defined in /etc/nsswitch.conf. Unless one requires exact control which DNS server(s) to query, the dns.resolver.query() function (actually a wrapper for the more versatile dns.resolver.Resolver.query() method) is the right choice. It will simply use the servers and other settings configured in /etc/resolv.conf. The function dns.resolver.get_default_resolver() returns the details. Check the resolv.conf(5) manual page for more information. End of explanation """ answers = dns.resolver.query('www.google.com') answers answers.canonical_name.to_text(), answers.canonical_name.to_unicode() answers.expiration answers.qname answers.qname.to_text(), answers.qname.to_unicode() answers.rdclass == dns.rdataclass.IN answers.rdtype == dns.rdatatype.A answers.response answers.response.edns answers.response.time answers.response.flags # https://tools.ietf.org/html/rfc1035 4.1.1. Header section format answers.response.flags & 0b1000000000000000 # 0=query, 1=response answers.response.flags & 0b0000010000000000 # 1=authoratative answers.response.flags & 0b0000001000000000 # 1=truncated answers.response.flags & 0b0000000100000000 # 1=recursive desired (copied into response) answers.response.flags & 0b0000000010000000 # 1=recursion available answers.response.rcode() # errors? answers.rrset len(answers.rrset) list(answers.rrset) answers.rrset[0].address answers.rrset[0].rdclass == dns.rdataclass.IN answers.rrset[0].rdclass == dns.rdatatype.A """ Explanation: Querying A records The dns.resolver.query() function accepts a number of arguments but in most cases only the first one (qname) or two (rdtype) are required. Note how the returned value for an A query (the default) contains much more information than just the IP address. End of explanation """ answers = dns.resolver.query('google.com', 'MX') len(answers.rrset) answers.rrset[0] answers.rrset[0].exchange.to_text() answers.rrset[0].preference answers.rrset[0].rdtype == dns.rdatatype.MX """ Explanation: Querying other record types Of course one query other records than A type. The second argument to dns.resolver.query accepts the contants defined in dns.rdatatype or simply a string value. The attributes of the returned records are specific ot the queried type, e.g. MX records have a preference attribute. Example: Mail Exchanger (MX) records End of explanation """ answers = dns.resolver.query('google.com', 'SOA') len(answers.rrset) answers.rrset[0] answers.rrset[0].mname.to_text() answers.rrset[0].serial answers.rrset[0].refresh """ Explanation: Example: Start of Authority (SOA) record End of explanation """ answers = dns.resolver.query('4.4.8.8.in-addr.arpa', 'PTR') len(answers) answers.rrset[0] answers.rrset[0].to_text() """ Explanation: Example: PTR record End of explanation """ answers = dns.resolver.query('_http._tcp.juenemann.net', 'SRV') len(answers) answers.rrset[0] answers.rrset[0].to_text() answers.rrset[0].target.to_text() answers.rrset[0].priority, answers.rrset[0].weight, answers.rrset[0].port """ Explanation: Example SRV record End of explanation """ resolver = dns.resolver.Resolver() resolver.nameservers = ['208.67.222.222', '208.67.220.220'] resolver.nameservers answers = resolver.query('google.com', 'NS') list(answers.rrset) """ Explanation: Querying specific DNS server(s) There are cases where one does not want to use alternative settings to those configured in /etc/resolv.conf. For this purpose one has to create and customise an instance of the dns.resolver.Resolver class. Example: Querying the OpenDNS servers End of explanation """ resolver = dns.resolver.Resolver() resolver.nameservers = ['208.67.222.222', '208.67.220.220'] resolver.nameservers resolver.set_flags(0b0000000000000000) # Clear all flags, including 'recursive desired' resolver.flags try: answers = resolver.query('www.google.com', 'A') except Exception as e: print e answers = resolver.query('www.opendns.com', 'A') list(answers.rrset) """ Explanation: Example: Disallow recursion In this example the query flags are manipulated to disallow recursion. This will fail if the queried DNS servers are not authorative for the domain. End of explanation """
Benedicto/ML-Learning
Clustering_0_nearest-neighbors-features-and-metrics_blank.ipynb
gpl-3.0
import graphlab import matplotlib.pyplot as plt import numpy as np %matplotlib inline """ Explanation: Nearest Neighbors When exploring a large set of documents -- such as Wikipedia, news articles, StackOverflow, etc. -- it can be useful to get a list of related material. To find relevant documents you typically * Decide on a notion of similarity * Find the documents that are most similar In the assignment you will * Gain intuition for different notions of similarity and practice finding similar documents. * Explore the tradeoffs with representing documents using raw word counts and TF-IDF * Explore the behavior of different distance metrics by looking at the Wikipedia pages most similar to President Obama’s page. Note to Amazon EC2 users: To conserve memory, make sure to stop all the other notebooks before running this notebook. Import necessary packages As usual we need to first import the Python packages that we will need. End of explanation """ wiki = graphlab.SFrame('people_wiki.gl') wiki """ Explanation: Load Wikipedia dataset We will be using the same dataset of Wikipedia pages that we used in the Machine Learning Foundations course (Course 1). Each element of the dataset consists of a link to the wikipedia article, the name of the person, and the text of the article (in lowercase). End of explanation """ wiki['word_count'] = graphlab.text_analytics.count_words(wiki['text']) wiki """ Explanation: Extract word count vectors As we have seen in Course 1, we can extract word count vectors using a GraphLab utility function. We add this as a column in wiki. End of explanation """ model = graphlab.nearest_neighbors.create(wiki, label='name', features=['word_count'], method='brute_force', distance='euclidean') """ Explanation: Find nearest neighbors Let's start by finding the nearest neighbors of the Barack Obama page using the word count vectors to represent the articles and Euclidean distance to measure distance. For this, again will we use a GraphLab Create implementation of nearest neighbor search. End of explanation """ model.query(wiki[wiki['name']=='Barack Obama'], label='name', k=10) """ Explanation: Let's look at the top 10 nearest neighbors by performing the following query: End of explanation """ def top_words(name): """ Get a table of the most frequent words in the given person's wikipedia page. """ row = wiki[wiki['name'] == name] word_count_table = row[['word_count']].stack('word_count', new_column_name=['word','count']) return word_count_table.sort('count', ascending=False) obama_words = top_words('Barack Obama') obama_words barrio_words = top_words('Francisco Barrio') barrio_words """ Explanation: All of the 10 people are politicians, but about half of them have rather tenuous connections with Obama, other than the fact that they are politicians. Francisco Barrio is a Mexican politician, and a former governor of Chihuahua. Walter Mondale and Don Bonker are Democrats who made their career in late 1970s. Wynn Normington Hugh-Jones is a former British diplomat and Liberal Party official. Andy Anstett is a former politician in Manitoba, Canada. Nearest neighbors with raw word counts got some things right, showing all politicians in the query result, but missed finer and important details. For instance, let's find out why Francisco Barrio was considered a close neighbor of Obama. To do this, let's look at the most frequently used words in each of Barack Obama and Francisco Barrio's pages: End of explanation """ combined_words = obama_words.join(barrio_words, on='word') combined_words """ Explanation: Let's extract the list of most frequent words that appear in both Obama's and Barrio's documents. We've so far sorted all words from Obama and Barrio's articles by their word frequencies. We will now use a dataframe operation known as join. The join operation is very useful when it comes to playing around with data: it lets you combine the content of two tables using a shared column (in this case, the word column). See the documentation for more details. For instance, running obama_words.join(barrio_words, on='word') will extract the rows from both tables that correspond to the common words. End of explanation """ combined_words = combined_words.rename({'count':'Obama', 'count.1':'Barrio'}) combined_words """ Explanation: Since both tables contained the column named count, SFrame automatically renamed one of them to prevent confusion. Let's rename the columns to tell which one is for which. By inspection, we see that the first column (count) is for Obama and the second (count.1) for Barrio. End of explanation """ combined_words.sort('Obama', ascending=False) """ Explanation: Note. The join operation does not enforce any particular ordering on the shared column. So to obtain, say, the five common words that appear most often in Obama's article, sort the combined table by the Obama column. Don't forget ascending=False to display largest counts first. End of explanation """ top_5_words = combined_words.sort('Obama', ascending=False)[0:5]['word'] common_words = set(top_5_words) def has_top_words(word_count_vector): for word in common_words: if(word not in word_count_vector): return False return True # YOUR CODE HERE wiki['has_top_words'] = wiki['word_count'].apply(has_top_words) # use has_top_words column to answer the quiz question wiki['has_top_words'].sum() # YOUR CODE HERE """ Explanation: Quiz Question. Among the words that appear in both Barack Obama and Francisco Barrio, take the 5 that appear most frequently in Obama. How many of the articles in the Wikipedia dataset contain all of those 5 words? Hint: * Refer to the previous paragraph for finding the words that appear in both articles. Sort the common words by their frequencies in Obama's article and take the largest five. * Each word count vector is a Python dictionary. For each word count vector in SFrame, you'd have to check if the set of the 5 common words is a subset of the keys of the word count vector. Complete the function has_top_words to accomplish the task. - Convert the list of top 5 words into set using the syntax set(common_words) where common_words is a Python list. See this link if you're curious about Python sets. - Extract the list of keys of the word count dictionary by calling the keys() method. - Convert the list of keys into a set as well. - Use issubset() method to check if all 5 words are among the keys. * Now apply the has_top_words function on every row of the SFrame. * Compute the sum of the result column to obtain the number of articles containing all the 5 top words. End of explanation """ print 'Output from your function:', has_top_words(wiki[32]['word_count']) print 'Correct output: True' print 'Also check the length of unique_words. It should be 167' print 'Output from your function:', has_top_words(wiki[33]['word_count']) print 'Correct output: False' print 'Also check the length of unique_words. It should be 188' """ Explanation: Checkpoint. Check your has_top_words function on two random articles: End of explanation """ obama_word_count = wiki[wiki['name']=='Barack Obama']['word_count'][0] bush_word_count = wiki[wiki['name']=='George W. Bush']['word_count'][0] biden_word_count = wiki[wiki['name']=='Joe Biden']['word_count'][0] from graphlab.toolkits.distances import euclidean print euclidean(obama_word_count, biden_word_count) print euclidean(bush_word_count, biden_word_count) print euclidean(obama_word_count, bush_word_count) """ Explanation: Quiz Question. Measure the pairwise distance between the Wikipedia pages of Barack Obama, George W. Bush, and Joe Biden. Which of the three pairs has the smallest distance? Hint: To compute the Euclidean distance between two dictionaries, use graphlab.toolkits.distances.euclidean. Refer to this link for usage. End of explanation """ bush_words = top_words('George W. Bush') obama_bush = bush_words.join(obama_words, on='word').rename({'count': 'bush', 'count.1': 'obama'}) obama_bush.sort('obama', ascending=False)[0:10]['word'] """ Explanation: Quiz Question. Collect all words that appear both in Barack Obama and George W. Bush pages. Out of those words, find the 10 words that show up most often in Obama's page. End of explanation """ wiki['tf_idf'] = graphlab.text_analytics.tf_idf(wiki['word_count']) model_tf_idf = graphlab.nearest_neighbors.create(wiki, label='name', features=['tf_idf'], method='brute_force', distance='euclidean') model_tf_idf.query(wiki[wiki['name'] == 'Barack Obama'], label='name', k=10) """ Explanation: Note. Even though common words are swamping out important subtle differences, commonalities in rarer political words still matter on the margin. This is why politicians are being listed in the query result instead of musicians, for example. In the next subsection, we will introduce a different metric that will place greater emphasis on those rarer words. TF-IDF to the rescue Much of the perceived commonalities between Obama and Barrio were due to occurrences of extremely frequent words, such as "the", "and", and "his". So nearest neighbors is recommending plausible results sometimes for the wrong reasons. To retrieve articles that are more relevant, we should focus more on rare words that don't happen in every article. TF-IDF (term frequency–inverse document frequency) is a feature representation that penalizes words that are too common. Let's use GraphLab Create's implementation of TF-IDF and repeat the search for the 10 nearest neighbors of Barack Obama: End of explanation """ def top_words_tf_idf(name): row = wiki[wiki['name'] == name] word_count_table = row[['tf_idf']].stack('tf_idf', new_column_name=['word','weight']) return word_count_table.sort('weight', ascending=False) obama_tf_idf = top_words_tf_idf('Barack Obama') obama_tf_idf schiliro_tf_idf = top_words_tf_idf('Phil Schiliro') schiliro_tf_idf """ Explanation: Let's determine whether this list makes sense. * With a notable exception of Roland Grossenbacher, the other 8 are all American politicians who are contemporaries of Barack Obama. * Phil Schiliro, Jesse Lee, Samantha Power, and Eric Stern worked for Obama. Clearly, the results are more plausible with the use of TF-IDF. Let's take a look at the word vector for Obama and Schilirio's pages. Notice that TF-IDF representation assigns a weight to each word. This weight captures relative importance of that word in the document. Let us sort the words in Obama's article by their TF-IDF weights; we do the same for Schiliro's article as well. End of explanation """ common_tf_idf = obama_tf_idf.join(schiliro_tf_idf, on='word').rename({'weight':'obama', 'weight.1':'schiliro'}).sort('obama', ascending=False) common_tf_idf """ Explanation: Using the join operation we learned earlier, try your hands at computing the common words shared by Obama's and Schiliro's articles. Sort the common words by their TF-IDF weights in Obama's document. End of explanation """ common_words = set(common_tf_idf['word'][0:5]) # YOUR CODE HERE def has_top_words(word_count_vector): unique_words = set(word_count_vector.keys()) return common_words <= unique_words # YOUR CODE HERE wiki['has_top_words'] = wiki['word_count'].apply(has_top_words) # use has_top_words column to answer the quiz question wiki['has_top_words'].sum() # YOUR CODE HERE """ Explanation: The first 10 words should say: Obama, law, democratic, Senate, presidential, president, policy, states, office, 2011. Quiz Question. Among the words that appear in both Barack Obama and Phil Schiliro, take the 5 that have largest weights in Obama. How many of the articles in the Wikipedia dataset contain all of those 5 words? End of explanation """ obama_tfidf = wiki[wiki['name']=='Barack Obama']['tf_idf'][0] bush_tfidf = wiki[wiki['name']=='George W. Bush']['tf_idf'][0] biden_tfidf = wiki[wiki['name']=='Joe Biden']['tf_idf'][0] from graphlab.toolkits.distances import euclidean print euclidean(obama_tfidf, biden_tfidf) print euclidean(bush_tfidf, biden_tfidf) print euclidean(obama_tfidf, bush_tfidf) """ Explanation: Notice the huge difference in this calculation using TF-IDF scores instead of raw word counts. We've eliminated noise arising from extremely common words. Choosing metrics You may wonder why Joe Biden, Obama's running mate in two presidential elections, is missing from the query results of model_tf_idf. Let's find out why. First, compute the distance between TF-IDF features of Obama and Biden. Quiz Question. Compute the Euclidean distance between TF-IDF features of Obama and Biden. Hint: When using Boolean filter in SFrame/SArray, take the index 0 to access the first match. End of explanation """ model_tf_idf.query(wiki[wiki['name'] == 'Barack Obama'], label='name', k=10) """ Explanation: The distance is larger than the distances we found for the 10 nearest neighbors, which we repeat here for readability: End of explanation """ def compute_length(row): return len(row['text'].split(' ')) wiki['length'] = wiki.apply(compute_length) nearest_neighbors_euclidean = model_tf_idf.query(wiki[wiki['name'] == 'Barack Obama'], label='name', k=100) nearest_neighbors_euclidean = nearest_neighbors_euclidean.join(wiki[['name', 'length']], on={'reference_label':'name'}) nearest_neighbors_euclidean.sort('rank') """ Explanation: But one may wonder, is Biden's article that different from Obama's, more so than, say, Schiliro's? It turns out that, when we compute nearest neighbors using the Euclidean distances, we unwittingly favor short articles over long ones. Let us compute the length of each Wikipedia document, and examine the document lengths for the 100 nearest neighbors to Obama's page. End of explanation """ plt.figure(figsize=(10.5,4.5)) plt.hist(wiki['length'], 50, color='k', edgecolor='None', histtype='stepfilled', normed=True, label='Entire Wikipedia', zorder=3, alpha=0.8) plt.hist(nearest_neighbors_euclidean['length'], 50, color='r', edgecolor='None', histtype='stepfilled', normed=True, label='100 NNs of Obama (Euclidean)', zorder=10, alpha=0.8) plt.axvline(x=wiki['length'][wiki['name'] == 'Barack Obama'][0], color='k', linestyle='--', linewidth=4, label='Length of Barack Obama', zorder=2) plt.axvline(x=wiki['length'][wiki['name'] == 'Joe Biden'][0], color='g', linestyle='--', linewidth=4, label='Length of Joe Biden', zorder=1) plt.axis([0, 1000, 0, 0.04]) plt.legend(loc='best', prop={'size':15}) plt.title('Distribution of document length') plt.xlabel('# of words') plt.ylabel('Percentage') plt.rcParams.update({'font.size':16}) plt.tight_layout() """ Explanation: To see how these document lengths compare to the lengths of other documents in the corpus, let's make a histogram of the document lengths of Obama's 100 nearest neighbors and compare to a histogram of document lengths for all documents. End of explanation """ model2_tf_idf = graphlab.nearest_neighbors.create(wiki, label='name', features=['tf_idf'], method='brute_force', distance='cosine') nearest_neighbors_cosine = model2_tf_idf.query(wiki[wiki['name'] == 'Barack Obama'], label='name', k=100) nearest_neighbors_cosine = nearest_neighbors_cosine.join(wiki[['name', 'length']], on={'reference_label':'name'}) nearest_neighbors_cosine.sort('rank') """ Explanation: Relative to the rest of Wikipedia, nearest neighbors of Obama are overwhemingly short, most of them being shorter than 300 words. The bias towards short articles is not appropriate in this application as there is really no reason to favor short articles over long articles (they are all Wikipedia articles, after all). Many of the Wikipedia articles are 300 words or more, and both Obama and Biden are over 300 words long. Note: For the interest of computation time, the dataset given here contains excerpts of the articles rather than full text. For instance, the actual Wikipedia article about Obama is around 25000 words. Do not be surprised by the low numbers shown in the histogram. Note: Both word-count features and TF-IDF are proportional to word frequencies. While TF-IDF penalizes very common words, longer articles tend to have longer TF-IDF vectors simply because they have more words in them. To remove this bias, we turn to cosine distances: $$ d(\mathbf{x},\mathbf{y}) = 1 - \frac{\mathbf{x}^T\mathbf{y}}{\|\mathbf{x}\| \|\mathbf{y}\|} $$ Cosine distances let us compare word distributions of two articles of varying lengths. Let us train a new nearest neighbor model, this time with cosine distances. We then repeat the search for Obama's 100 nearest neighbors. End of explanation """ plt.figure(figsize=(10.5,4.5)) plt.figure(figsize=(10.5,4.5)) plt.hist(wiki['length'], 50, color='k', edgecolor='None', histtype='stepfilled', normed=True, label='Entire Wikipedia', zorder=3, alpha=0.8) plt.hist(nearest_neighbors_euclidean['length'], 50, color='r', edgecolor='None', histtype='stepfilled', normed=True, label='100 NNs of Obama (Euclidean)', zorder=10, alpha=0.8) plt.hist(nearest_neighbors_cosine['length'], 50, color='b', edgecolor='None', histtype='stepfilled', normed=True, label='100 NNs of Obama (cosine)', zorder=11, alpha=0.8) plt.axvline(x=wiki['length'][wiki['name'] == 'Barack Obama'][0], color='k', linestyle='--', linewidth=4, label='Length of Barack Obama', zorder=2) plt.axvline(x=wiki['length'][wiki['name'] == 'Joe Biden'][0], color='g', linestyle='--', linewidth=4, label='Length of Joe Biden', zorder=1) plt.axis([0, 1000, 0, 0.04]) plt.legend(loc='best', prop={'size':15}) plt.title('Distribution of document length') plt.xlabel('# of words') plt.ylabel('Percentage') plt.rcParams.update({'font.size': 16}) plt.tight_layout() """ Explanation: From a glance at the above table, things look better. For example, we now see Joe Biden as Barack Obama's nearest neighbor! We also see Hillary Clinton on the list. This list looks even more plausible as nearest neighbors of Barack Obama. Let's make a plot to better visualize the effect of having used cosine distance in place of Euclidean on our TF-IDF vectors. End of explanation """ sf = graphlab.SFrame({'text': ['democratic governments control law in response to popular act']}) sf['word_count'] = graphlab.text_analytics.count_words(sf['text']) encoder = graphlab.feature_engineering.TFIDF(features=['word_count'], output_column_prefix='tf_idf') encoder.fit(wiki) sf = encoder.transform(sf) sf """ Explanation: Indeed, the 100 nearest neighbors using cosine distance provide a sampling across the range of document lengths, rather than just short articles like Euclidean distance provided. Moral of the story: In deciding the features and distance measures, check if they produce results that make sense for your particular application. Problem with cosine distances: tweets vs. long articles Happily ever after? Not so fast. Cosine distances ignore all document lengths, which may be great in certain situations but not in others. For instance, consider the following (admittedly contrived) example. +--------------------------------------------------------+ | +--------+ | | One that shall not be named | Follow | | | @username +--------+ | | | | Democratic governments control law in response to | | popular act. | | | | 8:05 AM - 16 May 2016 | | | | Reply Retweet (1,332) Like (300) | | | +--------------------------------------------------------+ How similar is this tweet to Barack Obama's Wikipedia article? Let's transform the tweet into TF-IDF features, using an encoder fit to the Wikipedia dataset. (That is, let's treat this tweet as an article in our Wikipedia dataset and see what happens.) End of explanation """ tweet_tf_idf = sf[0]['tf_idf.word_count'] tweet_tf_idf obama = wiki[wiki['name'] == 'Barack Obama'] obama """ Explanation: Let's look at the TF-IDF vectors for this tweet and for Barack Obama's Wikipedia entry, just to visually see their differences. End of explanation """ obama_tf_idf = obama[0]['tf_idf'] graphlab.toolkits.distances.cosine(obama_tf_idf, tweet_tf_idf) """ Explanation: Now, compute the cosine distance between the Barack Obama article and this tweet: End of explanation """ model2_tf_idf.query(obama, label='name', k=10) """ Explanation: Let's compare this distance to the distance between the Barack Obama article and all of its Wikipedia 10 nearest neighbors: End of explanation """
saketkc/hatex
2015_Fall/MATH-578B/Homework3/Homework3.ipynb
mit
### Simulation %matplotlib inline from __future__ import division import numpy as np import matplotlib.pyplot as plt np.random.seed(1) import math N=1000 s=0 def R(x,y): return math.sqrt(x*x+y*y) for i in range(N): r=-100 y=0 x=0 while R(x,y)>r: S=np.random.uniform(size=2) x=S[0] y=S[1] r=np.random.exponential(1) s+=r print 'Average radius: {}'.format(s/N) """ Explanation: Problem 1 Given: $\Lambda$ is a poisson process on $\mathbb{R}$. Each point $x_i$ in $\Lambda$ underoges some markging process' resulting in $N={(x_1,U_1), (x_2,U_2) \dots (x_n, U_n)}$ where $U_i$ are iid generated from some process witg density $\mu$ This essentially is a generalisation of the Colouring theorem. Let the joint 'marked' distribution $\Lambda'$ be denoted by: $f(x,y)$ where $x\ \in \ \Lambda$ and $y\ \in U$ and $(x,y) \in \Lambda'$ which is in $\mathbb{R}^2$ Consider $F = \sum_{x_i\ \in\ \Lambda}f(x_i,y_i)$ Fact 1: $\Lambda' = {(X,U)|X\in \Lambda}$ is an independent process. (This is only true conditional on $X$) Thus, $E[e^{-sF}|X \in \Lambda] = \Pi_{X_i \in \Lambda} f(X_i, U_i) = \Pi_{X_i \in \Lambda}\int_{U}e^{-sf(X_i,U_i)}p(X_i,U_i)dU_i$ Fact 2: In any region $\Lambda_i$, the number of points $N(\Lambda_i) \sim Poisson(\mu_i)$ where $\mu_i = \int_{\Lambda_i} \mu(x)dx$ and hence, consider: $F = \sum_{i=1}^k f(X_i) = \sum_{i=1}f_iN_i^k$ being $k$ disjoint sets where $f$ is any measurable function. then $$ \begin{align} E[e^{sF}] &= \Pi_{i=1}^k E[e^{sf_iN_i}]\ &= \Pi(e^{\lambda_i(e^{fs}-1)})\ \text{where} \lambda_i = \int_{A_i}\lambda(x)dx\ &= e^{\sum_{i=1}^k \int(e^{fs}-1)\lambda(x)dx}\ &= e^{\sum_{i=1}^k \int(e^{fs}-1)\lambda(x)dx}\ &= e^{ \int_{\Lambda}(e^{fs}-1)\lambda(x)dx}\ \end{align} $$ or $E[e^{-sF}] = exp(-\int_{\Lambda}(1-e^{fs})\lambda(x)dx)$ Now, consider $E[e^{-sF'}]$ where $F'$ is the sum of independent random variable $f(X_i, U_i)$ Conditional on $\Lambda$: $E[e^{-sF'}|\Lambda] = \Pi_{X_i\in \Lambda}E[e^{-sf(X_i,U_i)}]$ Consider the measurable function $f'(x) = -log(\int_U e^{-f(X,U)}p(X,U)du)$, then: [Couldn't take it further from here, but the idea should be to define $F^* = \sum f(X,U)$. I was not able to come up with a generating function for this. The idea was to show that it has generating function that for a poisson] Aliter [Source Grimmett and Stirzker, 3rd Edition 6.13.5] Consider $f: R \longrightarrow R^2$ denoting the distribution of points in $\Lambda'$ then for any set $B\subset R^2$, the number of points of f(\Lambda') in B is given by $N_B = |\Lambda' \cup f^{-1}B| \sim Poisson(B)$ Given project disjoint sets $B_i$ in $R^2$, there pre-images in $R$ are also disjoint and $\Lambda(A) \sim Poisson(\int_A \lambda(x)dx)$ $\Lambda'(B) \sim \Lambda(f^{-1}B) = Poisson(\int_{f^{-1}B}\lambda(x)dx) = Poisson(\int_B \lambda(x)dx\mu(y)dy$ Problem 2 Given Raindrops fall as a $PPP(\lambda drops/cm^2)$ on $R^2 \times [0, \infty)$ and each drop scatters as in a radius of circle $r \sim exp(1/cm)$. To find: Probability density of first drop touching the origin. $\lambda=1$ Define $U_i$ to be iid $Bernoulli(p(x_k,y_k)$ given by: $$ U_k = \begin{cases} 1 & if \sqrt{x_k^2+y_k^2} \leq r_k\ 0 & otherwise \end{cases} $$ By coloring theorem $\implies$ $\Lambda'={(r_k,x_k,y_k}\sim PPP(\lambda p(x_k,y_k))$ For the drop to splash the origin with radius '$r$': Consider $\Lambda^1 = {(t_i,x_i,y_i,r_i) \in \Lambda: U_k=1}$ $\tau = min{t_k: (t_k,x_k,y_k,r_k) \in \Lambda'}$ Consider $P(t < \tau, R<r)$ = P[no points in $[0,r] \times [0, t] \times R^2$] = $P[\Lambda^1([0,r] \times [0, t] \times R^2)=0]$ $P[\Lambda^1([0,r] \times [0, 1] \times R^2)=0]=exp(-\int_{0}^r \int_{0}^{1} \int_{R^2}\lambda p(x,y)dxdydtdr) =exp(-\lambda 2\pi tr)$ Now, $P(R<r)=\int P(t,R)dt = exp(-2\pi\lambda r)$ Thus, $R \sim exponential({2\pi\lambda})$ $ER=\int_0^{\infty} re^{-2\pi\lambda r}dr=\frac{1}{2\pi} = 0.15$ End of explanation """ k_a=2e-6 k_b=2e-6 k_p=5e-6 k_d=1e-5 ll = 1e6 P = np.matrix([[1-k_a-k_b, k_a ,k_b, 0, 0, 0], [k_a, 1-k_a-k_b, 0, k_b, 0, 0], [k_b, 0, 1-k_a-k_b, k_a, 0, 0], [0, k_b, k_a, 1-k_a-k_b-k_p, k_p, 0], [0, 0, 0, 0, 1-k_d, k_d], [0, 0, 0, k_d, 0, 1-k_d]], dtype=np.float64) Q = ll*(P-np.eye(6)) print(Q) Qd= Q[:-1,:-1] Qi = np.linalg.pinv(Qd) u=(np.sum(Qi, axis=1)*-1) u=u.tolist() def h(x): s=0 ht=0 cc=0 for i in range(1,10000): new_state=x while new_state!=5: old_state=new_state probs = Q[old_state,:]/-Q[old_state,old_state] probs=probs.tolist()[0] probs[old_state]=0 qaa = np.random.exponential(-1/Q[old_state,old_state]) z=np.random.choice(6, 1, p=probs) new_state = z[0] #states[z[0]] s+=qaa return s/10000 """ Explanation: The simulation results do not seem to be close to the expected results of 0.15 Problem 3 Part (a) In order to simulate the continuous time MC, we make use of the instantaneous rate matrix $Q$ given by: $Q=\lambda(P-I)$ where $\lambda=10^6$ The coninuous time MC is approximated to occur in discreted time steps of $10^{-6}$ seconds $Q_{aa}$ = Total jump rate out of state a $Q_{ab}$ = Jum rate from $a \longrightarrow b$ Due to the original transition matrix $P$ having extremely small entries, most of the time is spent in the same state. By approximating the holding time to be poisson, we arrive at the $(e^{tQ})_{ab}$ approximation for $P(Y_t=b|Y_0=a)$ Part (b) $\tau_\dagger = inf{t \geq 0: Y_t=\dagger}$ i.e $\tag_\dagger$ is the hitting time and $u(a) = E[\tau_\dagger|Y_0=a]$ defines the mean hitting time. $u(\dagger)=0$ For $a \neq \dagger$ $u(a) = \text{Hold time in state a} + \sum_b \text{(fractional jump rate from $a$ to $b$)} \times u(b)$ Alternatively: $u(a) = \frac{1}{-Q_{aa}} + \sum_{b \neq a}(\frac{Q_{ab}}{-Q_{aa}})u(b)$ $\implies$ $-Q_{aa}u(a) = 1 + \sum_{b \neq a} Q_{ab}u(b)$ We thus solve for: $Q\vec{u}=-\vec{1}$ End of explanation """ print('From calculation: {}\t From Simulation: {}'.format(u[0][0],h(0))) """ Explanation: Starting state $\phi$ End of explanation """ print('From calculation: {}\t From Simulation: {}'.format(u[1][0],h(1))) """ Explanation: Starting state $\alpha$ End of explanation """ print('From calculation: {}\t From Simulation: {}'.format(u[2][0],h(2))) """ Explanation: Starting state $\beta$ End of explanation """ print('From calculation: {}\t From Simulation: {}'.format(u[3][0],h(3))) """ Explanation: Starting state $\alpha+\beta$ End of explanation """ print('From calculation: {}\t From Simulation: {}'.format(u[4][0],h(4))) """ Explanation: Starting state $pol$ End of explanation """
erickpeirson/statistical-computing
Markov Chain Monte Carlo.ipynb
cc0-1.0
%matplotlib inline import random import math from matplotlib import pyplot as plt import numpy as np import pandas as pd from scipy.stats import norm, uniform, multivariate_normal """ Explanation: Chapter 1 - Markov Chain Monte Carlo End of explanation """ U = [random.random() for i in xrange(10000)] """ Explanation: 1.1 Generating $U$ ~ $Uniform(0, 1)$ End of explanation """ qexp = lambda p, l: -1.*math.log(1.-p)/l X = [qexp(u, 0.5) for u in U] plt.figure(figsize=(10,5)) plt.subplot(121) plt.hist(U) plt.title('Histogram of U') plt.xlabel('U') plt.ylabel('Frequency') plt.subplot(122) plt.hist(X) plt.title('Histogram of X') plt.xlabel('X') plt.ylabel('Frequency') plt.tight_layout() plt.show() """ Explanation: 1.2 Inverse CDF Method Inverse CDF for $Exp(\lambda)$ is: $ F^{-1}(p;\lambda) = \frac{-ln(1-p)}{\lambda} $ End of explanation """ df = lambda x: 0.7*norm.pdf(x, loc=2, scale=1) + 0.3*norm.pdf(x, loc=5, scale=1) # Some PDF. density_data = zip(*[(x, df(x)) for x in np.arange(-3, 12, 15./200)]) plt.plot(*density_data) plt.ylabel('f(x)') plt.xlabel('x') plt.show() """ Explanation: 1.3 Accept/Reject Algorithm Want to sample randomly from a distribution over (say) $X = (0., 1.)$. $f(x)$ is bounded by some M maximum. Generate $x_i$ ~ $Unif(0, 1)$ Generate $y_i = u_iM$ where $u_i$ ~ $Unif(0, 1)$ Reject points above $f(x)$ (density curve). Accept dots below. if $y_i <= f(x_i)$ then accept $x_i$. if $u_i <= \frac{f(x_i)}{M}$ then accept else reject Pros: * Simple Cons * Have to find the maximum (hard) * Inefficient (low acceptance rate) Alternative (better): * Bound the distribution more closely * Still inefficient, still need maximum 1.3.1 MCMC algorithm Markov-Chain Monte Carlo: * Monto Carlo: random accept/reject aspect. * Develop a chain of values, $x_0, x_1, ... x_i$ * To generate next step $x_{i+1}$, only consider $x_i$. * Markov property is that I only consider the current state. Suppose we have some distribution, and a starting value from that distribution $x_0$. To generate $x_1$: * Given $x_i$, propose some nearby value: $x_{i+1}^ = x_i + noise$. * If $f(x_{i+1}^ > f(x_i)$, then accept $x_{i+1} = x_{i+1}^$. (generalized by...) * If $f(x_{i+1}^ <= f(x_i)$, then sometimes accept... * Accept if: $u_{i+1}^ < \frac{f(x_{i+1}^)}{f(x_i)}$, where $u_{i+1}^*$ ~ $Unif(0, 1)$ End of explanation """ rproposal = lambda x_i: x_i + np.random.uniform(-2., 2.) x = [3] # Arbitrary starting point. x_star = [3] x_star.append(rproposal(x[0])) # Propose the next value. if (df(x_star[1]) / df(x[0])) > np.random.uniform(): x.append(x_star[1]) # Accept. else: x.append(x[0]) # Reject. x def eval(x_prev, x_prop): if (df(x_prop) / df(x_prev)) > np.random.uniform(): return x_prop return x_prev x, x_star = [3], [3] for i in xrange(10): x_star.append(rproposal(x[-1])) x.append(eval(x[-1], x_star[-1])) plt.subplot(211) plt.plot(*density_data) plt.xlim(-4, 12) plt.ylabel('f(x)') plt.subplot(212) plt.plot(x, range(len(x))) plt.scatter(x, range(len(x))) rejected = lambda x_a, x_p: 'green' if x_a == x_p else 'blue' plt.scatter(x_star, range(len(x)), c=[rejected(x[i], x_star[i]) for i in range(len(x))]) plt.xlim(-4, 12) plt.ylim(0, len(x)) plt.xlabel('x') plt.ylabel('Iteration') plt.tight_layout() plt.show() """ Explanation: Generate a random proposal value within (say) +/- 2. End of explanation """ g_unif = lambda x_u, y_u: uniform.pdf(x_u)*uniform.pdf(y_u)/uniform.pdf(x_u) g_norm = lambda x_u, y_u: norm.pdf(x_u)*norm.pdf(y_u)/norm.pdf(x_u) def MCMC(df, start, rprop, dprop=None, N=1000): """ Perform Markov-Chain Monte Carlo simulation. Parameters ---------- df : callable Target distribution. start : float Starting value. rprop : callable Proposal function. dprop : callable (proposed, current) (default: None) Gives the density of the proposal function centered on the current value. N : int Number of iterations Returns ------- chain : list Markov chain of size N. """ if dprop is None: # Treat as symmetric. dprop = lambda to, fr: 1. chain = [start] for i in xrange(N): x_star = rprop(chain[-1]) r1 = df(x_star) / df(chain[-1]) r2 = dprop(chain[-1], x_star) / dprop(x_star, chain[-1]) if r1*r2 > np.random.uniform(): chain.append(x_star) else: chain.append(chain[-1]) return chain def trace_plot(chain): plt.plot(chain) plt.ylabel('Value') plt.xlabel('Iteration') plt.show() chain = MCMC(df, 2, rproposal, N=1000) trace_plot(chain) chain += MCMC(df, chain[-1], rproposal, N=10000) plt.hist(chain, bins=20) plt.title('Histogram of chain') plt.xlabel('chain') plt.ylabel('Frequency') ax = plt.gca() # Plot the target density function. ax2 = ax.twinx() ax2.plot(*density_data, c='orange', lw=3) plt.show() """ Explanation: If the proposal is generated in a non-uniform fashion, then the accept/reject rule must change. Accept iff: $u_{i+1} <= \frac{f(x_{i+1}^)}{f(x_i)}\frac{g(x_i| x_{i+1}^)}{g(x_{i+1}^*| x_i)}$ $g(x_{i+1}^*|x_i)$ is the density of the proposal distribution, centered at $x_i$. End of explanation """ chain = MCMC(df, 2, rproposal, g_norm, N=1000) trace_plot(chain) """ Explanation: Pros (compared to bounded scatter method): * More efficient. * No need to find maximum value. Cons: * Still need to decide on a good proposal distribution. * Need a starting value. With asymmetric proposal distribution End of explanation """ p_small = lambda x: x + np.random.uniform(-0.1, 0.1) chain = MCMC(df, 2, p_small, N=1000) trace_plot(chain) p_large = lambda x: x + np.random.uniform(-30., +30.) chain = MCMC(df, 2, p_large, N=1000) trace_plot(chain) """ Explanation: 1.3.2 Common problems End of explanation """ chain = MCMC(df, 40, rproposal, N=1000) trace_plot(chain) def trace_plots(chains): for chain in chains: plt.plot(chain) plt.ylabel('Value') plt.xlabel('Iteration') plt.show() chain1 = MCMC(df, -30., rproposal, N=1000) chain2 = MCMC(df, 0., rproposal, N=1000) chain3 = MCMC(df, 30., rproposal, N=1000) trace_plots([chain1, chain2, chain3]) """ Explanation: Burn-in End of explanation """ p_small = lambda x: x + np.random.uniform(-0.1, 0.1) chain1 = MCMC(df, 2, p_small, N=1000) chain2 = MCMC(df, 2, p_small, N=1000) chain3 = MCMC(df, 2, p_small, N=1000) trace_plots([chain1, chain2, chain3]) """ Explanation: Assessing Convergence Visually, it is clear that chains converge around iteration 100. But we would really like a metric for convergence, mixing. End of explanation """ def Gelman(chains): if len(chains.shape) == 3: N_p = chains.shape[2] else: N_p = 1 generate = lambda ptn: np.array([np.array([np.array([ptn(p, i, c) for p in xrange(N_p) for i in xrange(chains.shape[1])]) for c in xrange(chains.shape[0])])]) params = generate(lambda p, i, c: 'x{0}'.format(p)) iters = generate(lambda p, i, c: i) labels = generate(lambda p, i, c: c) data = zip(chains.flat, params.flat, iters.flat, labels.flat) dataframe = pd.DataFrame(data, columns=('Value', 'Parameter', 'Iteration', 'Chain')) xbar = dataframe.groupby('Parameter').Value.mean() m = chains.shape[0] xbar_i = dataframe.groupby(('Parameter', 'Chain')).Value.mean() s2_i = dataframe.groupby(('Parameter', 'Chain')).Value.var() n = dataframe.groupby(('Parameter', 'Chain')).Value.count().mean() W = s2_i.mean() B = (n/(m-1.)) * ((xbar_i - xbar)**2).sum() sigma2_hat = W*(n-1.)/n + B/n R_hat = np.sqrt(sigma2_hat/W) n_eff = m*n*sigma2_hat/B # I missed what this was for. return R_hat, n_eff p_small = lambda x: x + np.random.uniform(-.1, .1) chain1 = np.array(MCMC(df, 3, p_small, N=1000)) chain2 = np.array(MCMC(df, 4, p_small, N=1000)) chain3 = np.array(MCMC(df, 5, p_small, N=1000)) chain4 = np.array(MCMC(df, 2, p_small, N=1000)) trace_plots([chain1, chain2, chain3, chain4]) Gelman(np.array([chain1, chain2, chain3, chain4])) p_small = lambda x: x + np.random.uniform(-.6, .6) chain1 = MCMC(df, 3, p_small, N=10000) chain2 = MCMC(df, 4, p_small, N=10000) chain3 = MCMC(df, 5, p_small, N=10000) chain4 = MCMC(df, 2, p_small, N=10000) trace_plots([chain1, chain2, chain3, chain4]) Gelman(np.array([chain1, chain2, chain3, chain4])) """ Explanation: Should have ~ same means, and ~ same variance. Use an ANOVA. Let $\bar{x_i}$ be the mean of chain $i$, and $s_i^2$ be the variance of chain $i$. At convergence, $\bar{x_1} = \bar{x_2} = \bar{x_3}... = \mu$ and $s_1^2 = s_2^2 = s_3^2... = \sigma^2$. Two ways to approximate $\sigma^2$ $W = \frac{1}{m}\sum^m_{i=1}{s^2_i}$, $m=$no. of chains. at convergence, $\bar{x_i}$ ~ $N(\mu, \frac{\sigma^2}{n})$, $n=$no. of chain steps. "Central limit theorem" Implies: $n\bar{x_i}$ ~ $N(\mu, \sigma^2)$ So the variance($n\bar{x_i}$) values estimates $\sigma^2$ $B = \frac{n}{m-1}\sum{(\bar{x_i} - \bar{x..})}^2$ $\bar{x..}$ is the mean of all values If the chains haven't converged, then $B >> \sigma^2$, and $W < \sigma^2$ (within-chain variance is too small). define $\hat{\sigma}^2 = \frac{n-1}{n}W + \frac{1}{n}B$ $\hat{R} = \sqrt{\frac{\hat{\sigma}^2}{W}}$ -- scale reduction factor. $\hat{R} > 1$. This gives an idea of how much more variance could increase if we let chains continue to wander in the space. If space is not well-explored, $\hat{R}$ is really big. As $\hat{R}$ approaches 1, we have better explored the space (and chains are agreeing). $1 <= \hat{R} < 1.05$ is considered "close enough." But trust the trace-plots over $\hat{R}$. Big idea: Do chains have the same mean (ANOVA)? $\hat{R}$ is the Gelman + Rubin (1992) statistic. End of explanation """ def mMCMC(df, start, rprop, dprop=None, N=1000, num_chains=4): """ Perform Markov-Chain Monte Carlo simulation with multiple chains.. Parameters ---------- df : callable Target distribution. start : float Starting value. rprop : callable Proposal function. dprop : callable (proposed, current) (default: None) Gives the density of the proposal function centered on the current value. N : int Number of iterations num_chains : int Number of chains. Returns ------- chains : numpy.array Shape (``num_chains``, ``N``, num. parameters) """ if dprop is None: # Treat as symmetric. dprop = lambda to, fr: 1. if not hasattr(start, '__iter__'): start = np.array([np.array(start) for i in xrange(num_chains)]) chains = [] for j in xrange(num_chains): chain = [start[j]] for i in xrange(N): x_star = rprop(chain[-1]) r1 = df(x_star) / df(chain[-1]) r2 = dprop(chain[-1], x_star) / dprop(x_star, chain[-1]) if r1*r2 > np.random.uniform(): chain.append(x_star) else: chain.append(chain[-1]) chains.append(np.array(chain)) return np.array(chains) chains = mMCMC(df, [3,4,1,2], p_small, N=1000, num_chains=4) trace_plots(chains) Gelman(chains) """ Explanation: Multiple MCMC chains, with support for multi-variate distributions mMCMC -- takes a list of starting points and runs all chains End of explanation """ dtarget = lambda x: multivariate_normal.pdf(x, mean=(3, 10), cov=[[3, 3], [3, 7]]) rprop = lambda x: multivariate_normal.rvs(mean=x) chains = mMCMC(dtarget, [[0, 0]], rprop, N=50, num_chains=1) x1 = np.linspace(-6, 12, 101) x2 = np.linspace(-11, 31, 101) X, Y = np.meshgrid(x1, x2) Z = np.array(map(dtarget, zip(X.flat, Y.flat))).reshape(101, 101) plt.figure(figsize=(10,7)) plt.contour(X, Y, Z) plt.plot(chains[0][:, 0], chains[0][:, 1], lw=2, alpha=0.5) plt.xlim(-2, 8) plt.ylim(0, 18) plt.xlabel('x') plt.ylabel('y') plt.show() chains = mMCMC(dtarget, [[0, 0], [8, 18]], rprop, N=1000, num_chains=2) plt.figure(figsize=(10,7)) plt.contour(X, Y, Z) plt.plot(chains[0][:, 0], chains[0][:, 1], alpha=0.3) plt.plot(chains[1][:, 0], chains[1][:, 1], alpha=0.3) plt.xlim(-2, 8) plt.ylim(0, 18) plt.xlabel('x') plt.ylabel('y') plt.show() Gelman(chains) """ Explanation: Multi-variate example End of explanation """
mne-tools/mne-tools.github.io
0.17/_downloads/f79b821209d128d6d63d736e8cc0beb3/plot_fdr_stats_evoked.ipynb
bsd-3-clause
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # # License: BSD (3-clause) import numpy as np from scipy import stats import matplotlib.pyplot as plt import mne from mne import io from mne.datasets import sample from mne.stats import bonferroni_correction, fdr_correction print(__doc__) """ Explanation: FDR correction on T-test on sensor data One tests if the evoked response significantly deviates from 0. Multiple comparison problem is addressed with False Discovery Rate (FDR) correction. End of explanation """ data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' event_id, tmin, tmax = 1, -0.2, 0.5 # Setup for reading the raw data raw = io.read_raw_fif(raw_fname) events = mne.read_events(event_fname)[:30] channel = 'MEG 1332' # include only this channel in analysis include = [channel] """ Explanation: Set parameters End of explanation """ picks = mne.pick_types(raw.info, meg=False, eog=True, include=include, exclude='bads') event_id = 1 reject = dict(grad=4000e-13, eog=150e-6) epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), reject=reject) X = epochs.get_data() # as 3D matrix X = X[:, 0, :] # take only one channel to get a 2D array """ Explanation: Read epochs for the channel of interest End of explanation """ T, pval = stats.ttest_1samp(X, 0) alpha = 0.05 n_samples, n_tests = X.shape threshold_uncorrected = stats.t.ppf(1.0 - alpha, n_samples - 1) reject_bonferroni, pval_bonferroni = bonferroni_correction(pval, alpha=alpha) threshold_bonferroni = stats.t.ppf(1.0 - alpha / n_tests, n_samples - 1) reject_fdr, pval_fdr = fdr_correction(pval, alpha=alpha, method='indep') threshold_fdr = np.min(np.abs(T)[reject_fdr]) """ Explanation: Compute statistic End of explanation """ times = 1e3 * epochs.times plt.close('all') plt.plot(times, T, 'k', label='T-stat') xmin, xmax = plt.xlim() plt.hlines(threshold_uncorrected, xmin, xmax, linestyle='--', colors='k', label='p=0.05 (uncorrected)', linewidth=2) plt.hlines(threshold_bonferroni, xmin, xmax, linestyle='--', colors='r', label='p=0.05 (Bonferroni)', linewidth=2) plt.hlines(threshold_fdr, xmin, xmax, linestyle='--', colors='b', label='p=0.05 (FDR)', linewidth=2) plt.legend() plt.xlabel("Time (ms)") plt.ylabel("T-stat") plt.show() """ Explanation: Plot End of explanation """
ocefpaf/folium
examples/plugin-Search.ipynb
mit
import geopandas states = geopandas.read_file( "https://rawcdn.githack.com/PublicaMundi/MappingAPI/main/data/geojson/us-states.json", driver="GeoJSON", ) cities = geopandas.read_file( "https://d2ad6b4ur7yvpq.cloudfront.net/naturalearth-3.3.0/ne_50m_populated_places_simple.geojson", driver="GeoJSON", ) """ Explanation: Let's get some JSON data from the web - both a point layer and a polygon GeoJson dataset with some population data. End of explanation """ states.describe() """ Explanation: And take a look at what our data looks like: End of explanation """ states_sorted = states.sort_values(by="density", ascending=False) states_sorted.head(5).append(states_sorted.tail(5))[["name", "density"]] """ Explanation: Look how far the minimum and maximum values for the density are from the top and bottom quartile breakpoints! We have some outliers in our data that are well outside the meat of most of the distribution. Let's look into this to find the culprits within the sample. End of explanation """ def rd2(x): return round(x, 2) minimum, maximum = states["density"].quantile([0.05, 0.95]).apply(rd2) mean = round(states["density"].mean(), 2) print(f"minimum: {minimum}", f"maximum: {maximum}", f"Mean: {mean}", sep="\n\n") """ Explanation: Looks like Washington D.C. and Alaska were the culprits on each end of the range. Washington was more dense than the next most dense state, New Jersey, than the least dense state, Alaska was from Wyoming, however. Washington D.C. has a has a relatively small land area for the amount of people that live there, so it makes sense that it's pretty dense. And Alaska has a lot of land area, but not much of it is habitable for humans. <br><br> However, we're looking at all of the states in the US to look at things on a more regional level. That high figure at the top of our range for Washington D.C. will really hinder the ability for us to differentiate between the other states, so let's account for that in the min and max values for our color scale, by getting the quantile values close to the end of the range. Anything higher or lower than those values will just fall into the 'highest' and 'lowest' bins for coloring. End of explanation """ import branca colormap = branca.colormap.LinearColormap( colors=["#f2f0f7", "#cbc9e2", "#9e9ac8", "#756bb1", "#54278f"], index=states["density"].quantile([0.2, 0.4, 0.6, 0.8]), vmin=minimum, vmax=maximum, ) colormap.caption = "Population Density in the United States" colormap """ Explanation: This looks better. Our min and max values for the colorscale are much closer to the mean value now. Let's run with these values, and make a colorscale. I'm just going to use a sequential light-to-dark color palette from the ColorBrewer. End of explanation """ us_cities = geopandas.sjoin(cities, states, how="inner", op="within") pop_ranked_cities = us_cities.sort_values(by="pop_max", ascending=False)[ ["nameascii", "pop_max", "geometry"] ].iloc[:20] """ Explanation: Let's narrow down these cities to United states cities, by using GeoPandas' spatial join functionality between two GeoDataFrame objects, using the Point 'within' Polygon functionality. End of explanation """ pop_ranked_cities.head(5) """ Explanation: Ok, now we have a new GeoDataFrame with our top 20 populated cities. Let's see the top 5. End of explanation """ import folium from folium.plugins import Search m = folium.Map(location=[38, -97], zoom_start=4) def style_function(x): return { "fillColor": colormap(x["properties"]["density"]), "color": "black", "weight": 2, "fillOpacity": 0.5, } stategeo = folium.GeoJson( states, name="US States", style_function=style_function, tooltip=folium.GeoJsonTooltip( fields=["name", "density"], aliases=["State", "Density"], localize=True ), ).add_to(m) citygeo = folium.GeoJson( pop_ranked_cities, name="US Cities", tooltip=folium.GeoJsonTooltip( fields=["nameascii", "pop_max"], aliases=["", "Population Max"], localize=True ), ).add_to(m) statesearch = Search( layer=stategeo, geom_type="Polygon", placeholder="Search for a US State", collapsed=False, search_label="name", weight=3, ).add_to(m) citysearch = Search( layer=citygeo, geom_type="Point", placeholder="Search for a US City", collapsed=True, search_label="nameascii", ).add_to(m) folium.LayerControl().add_to(m) colormap.add_to(m) m """ Explanation: Alright, let's build a map! End of explanation """
McIntyre-Lab/ipython-demo
hdf5.ipynb
gpl-2.0
# Import packages import numpy as np import tables as pt # PyTables import h5py as hp # h5py import pandas as pd import rpy2 %load_ext rpy2.ipython # Create a New HDF5 File h5file = pt.open_file('test.h5', mode='w', title='Test file') """ Explanation: HDF5 HDF5 stands for (Hierarchical Data Format 5), and it is developed by the HDF Group. From their website: HDF5 is a data model, library, and file format for storing and managing data. It supports an unlimited variety of datatypes, and is designed for flexible and efficient I/O and for high volume and complex data. HDF5 is portable and is extensible, allowing applications to evolve in their use of HDF5. The HDF5 Technology suite includes tools and applications for managing, manipulating, viewing, and analyzing data in the HDF5 format. Various programming languages have developed APIs for interacting with HDF formatted files, for example there are libraries in Python and R which I will briefly cover. There are also a set of command line tools developed by the HDF Group HERE, I will talk a little about h5ls and h5dump. My goal here is just to give a little taste, the true power of HDF5 is not apparent until you look at real use cases for example the python package vcfnp converts a vcf file into an HDF5 file allowing you to quickly access different parts of the VCF, see here. For all of these tools to work you need to install the HDF5 software from HDF5 group! On Linux (Mint) you can run the following: sudo apt-get update sudo apt-get install h5utils hdf5-tools hdfview libhdf5-dev On OSX take a look at MacPorts For Linux, OSX, and Windows you can download and install from the HDF group HDF5 in Python There are two major packages for interacting with HDF5 files (PyTables and h5py. Both packages have a slightly different interface which is discussed HERE. I will go over a quick example usage of PyTables, h5py, and Pandas + PyTables. You will need to have installed: * Python >= 2.6 including Python 3.x (Python >= 2.7 is highly recommended) * NumPy >= 1.7.1 * Numexpr >= 2.4 * Cython >= 0.14 * Pandas >= 0.14 PyTables PyTables can be installed using pip: pip install tables --user or using your python distributions package manager. End of explanation """ # Create new group group = h5file.create_group('/', 'pytables', 'PyTables Test') print(group) """ Explanation: HDF5 is organized in a hierarchical structure and syntax is similar to the Linux/OSX file structure. A group can be thought of as a folder. End of explanation """ # Create new table class HgSnpCall(pt.IsDescription): chrom = pt.StringCol(16) # 16-character String start = pt.UInt32Col() # Unsigned 32-bit integer end = pt.UInt32Col() # Unsigned 32-bit integer call = pt.StringCol(16) # 16-character String table = h5file.create_table(group, 'hg19', HgSnpCall, 'Human SNP Calls') # Add a row of data to the table. position = table.row position['chrom'] = 'chr4' position['start'] = 10023 position['end'] = 10024 position['call'] = 'A/G' position.append() # Flush table, similar to SQL table.flush() %%bash # Lets look at the table we created using an external utility hdfview 'test.h5' # Close the h5file h5file.close() """ Explanation: While a table can be thought of as a file in a folder. End of explanation """ # Create a DataFrame df_snp = pd.DataFrame({'chrom': [ 'chr4', 'chr4', 'chr2', 'chr2'], 'start': [10023, 3020, 40404, 20202], 'end': [10024, 3023, 40405, 20203], 'call': ['A/G', 'AA/G', 'T/C', 'A/C']}, columns=['chrom', 'start', 'end', 'call']) print(df_snp) # Save to hdf5 file hdf = pd.HDFStore('test.h5') hdf.put('pandas_test', df_snp, format='table', data_columns=True) hdf.close() %%bash # Now lets look at it again hdfview 'test.h5' """ Explanation: PyTables is very low level and is a little difficult to use by hand. Luckily Pandas has integrated PyTables so that you can quickly dumpt a Pandas DataFrame to an HDF5 file. Pandas + PyTables Now I am going to create a table in pandas and dump it to an HDF5 file. End of explanation """ %%R library(rhdf5) library(bit64) data = h5read('test.h5', 'pandas_test/table', bit64conversion='bit64') print(data) """ Explanation: As I have mentioned, there are libraries for reading HDF5 files in R. Now we can open this file in R using the following: End of explanation """ # Open a new hdf5 file hdf = hp.File('test.h5', 'a') # Create a new group group = hdf.create_group('h5py_test') # Create a new dataset object dat = group.create_dataset('matrix', shape=(100, 100), dtype='i') # I made a 100 x 100 matrix dat[...] # We can then do things to this matrix dat[0,0] = 999 print(dat[...]) hdf.close() %%bash hdfview test.h5 %%bash # On the command line we can also list the contents of an hdf5 file h5ls test.h5 %%bash # On the command line we can look at the contents an hdf5 file h5dump -d /h5py_test/matrix -s "0,0" -c "5,15" test.h5 %%bash # Clean up our mess #rm test.h5 """ Explanation: h5py h5py can be installed using pip: pip install h5py --user or using your python distributions package manager. While Pandas + PyTables if very useful for traditional data sets, HDF5 can store a variety of data types. The python package h5py is nice for a higher level access to an HDF5 file and can quickly add and store arrays and lists. End of explanation """
tpin3694/tpin3694.github.io
regex/match_any_of_series_of_characters.ipynb
mit
# Load regex package import re """ Explanation: Title: Match Any Of A Series Of Options Slug: match_any_of_series_of_characters Summary: Match Any Of A Series Of Options Date: 2016-05-01 12:00 Category: Regex Tags: Basics Authors: Chris Albon Based on: Regular Expressions Cookbook Preliminaries End of explanation """ # Create a variable containing a text string text = 'The quick brown fox jumped over the lazy brown bear.' """ Explanation: Create some text End of explanation """ # Find any of fox, snake, or bear re.findall(r'fox|snake|bear', text) """ Explanation: Apply regex End of explanation """
tensorflow/docs-l10n
site/ja/tutorials/load_data/pandas_dataframe.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2019 The TensorFlow Authors. Licensed under the Apache License, Version 2.0 (the "License"); End of explanation """ import pandas as pd import tensorflow as tf SHUFFLE_BUFFER = 500 BATCH_SIZE = 2 """ Explanation: pandas DataFrame を読み込む <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tutorials/load_data/pandas_dataframe"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org で表示</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/tutorials/load_data/pandas_dataframe.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Google Colab で実行</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/tutorials/load_data/pandas_dataframe.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub でソースを表示</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/tutorials/load_data/pandas_dataframe.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">ノートブックをダウンロード</a> </td> </table> このチュートリアルでは、<a href="https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html" class="external">pandas DataFrames</a> を TensorFlow に読み込む方法の例を示します。 このチュートリアルでは、UCI Machine Learning Repository が提供する小さな<a href="https://archive.ics.uci.edu/ml/datasets/heart+Disease" class="external">心臓疾患データセット</a>を使用します。CSV 形式で数百の行を含むデータセットです。各行は患者に関する情報で、列には属性が記述されています。この情報を使って、患者に心臓疾患があるかどうかを予測します。これは二項分類のタスクです。 pandas を使ってデータを読み取る End of explanation """ csv_file = tf.keras.utils.get_file('heart.csv', 'https://storage.googleapis.com/download.tensorflow.org/data/heart.csv') """ Explanation: 心臓疾患データセットを含む CSV ファイルをダウンロードします。 End of explanation """ df = pd.read_csv(csv_file) """ Explanation: pandas を使って CSV を読み取ります。 End of explanation """ df.head() df.dtypes """ Explanation: データは以下のように表示されます。 End of explanation """ target = df.pop('target') """ Explanation: target 列に含まれるラベルを予測するモデルを作成します。 End of explanation """ numeric_feature_names = ['age', 'thalach', 'trestbps', 'chol', 'oldpeak'] numeric_features = df[numeric_feature_names] numeric_features.head() """ Explanation: 配列としての DataFrame データのデータ型が統一されている場合、または、dtype の場合、NumPy 配列を使用できる場合であればどこでも pandas DataFrame を使用できます。これは、pandas.DataFrame クラスが __array__ プロトコルをサポートしているためであり、TensorFlow の tf.convert_to_tensor 関数がプロトコルをサポートするオブジェクトを受け入れるます。 データセットから数値特徴量を取得します (ここでは、カテゴリカル特徴量をスキップします)。 End of explanation """ tf.convert_to_tensor(numeric_features) """ Explanation: DataFrame は、DataFrame.values プロパティまたは numpy.array(df) を使用して NumPy 配列に変換できます。テンソルに変換するには、tf.convert_to_tensor を使用します。 End of explanation """ normalizer = tf.keras.layers.Normalization(axis=-1) normalizer.adapt(numeric_features) """ Explanation: 一般に、オブジェクトを tf.convert_to_tensor でテンソルに変換すれば、tf.Tensor を渡せる場合は、同様に渡すことができます。 Model.fit メソッド 単一のテンソルとして解釈される DataFrame は、Model.fit メソッドの引数として直接使用できます。 以下は、データセットの数値特徴に関するモデルのトレーニングの例です。 最初のステップは、入力範囲を正規化することです。そのために tf.keras.layers.Normalization レイヤーを使用します。 実行する前にレイヤーの平均と標準偏差を設定するには、必ず Normalization.adapt メソッドを呼び出してください。 End of explanation """ normalizer(numeric_features.iloc[:3]) """ Explanation: DataFrame の最初の 3 行でレイヤーを呼び出して、このレイヤーからの出力のサンプルを視覚化します。 End of explanation """ def get_basic_model(): model = tf.keras.Sequential([ normalizer, tf.keras.layers.Dense(10, activation='relu'), tf.keras.layers.Dense(10, activation='relu'), tf.keras.layers.Dense(1) ]) model.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=['accuracy']) return model """ Explanation: 単純なモデルの最初のレイヤーとして正規化レイヤーを使用します。 End of explanation """ model = get_basic_model() model.fit(numeric_features, target, epochs=15, batch_size=BATCH_SIZE) """ Explanation: DataFrame を x 引数として Model.fit に渡すと、Keras は DataFrame をNumPy 配列と同じように扱います。 End of explanation """ numeric_dataset = tf.data.Dataset.from_tensor_slices((numeric_features, target)) for row in numeric_dataset.take(3): print(row) numeric_batches = numeric_dataset.shuffle(1000).batch(BATCH_SIZE) model = get_basic_model() model.fit(numeric_batches, epochs=15) """ Explanation: tf.data を適用する tf.data 変換を均一な dtype の DataFrame に適用する場合、Dataset.from_tensor_slices メソッドは、DataFrame の行を反復処理するデータセットを作成します。各行は、最初は値のベクトルです。モデルをトレーニングするには、(inputs, labels) のペアが必要なので、(features, labels) と Dataset.from_tensor_slices を渡し、必要なスライスのペアを取得します。 End of explanation """ numeric_dict_ds = tf.data.Dataset.from_tensor_slices((dict(numeric_features), target)) """ Explanation: ディレクトリとしての DataFrame 型が異なるデータを処理する場合、DataFrame を単一の配列であるかのように扱うことができなくなります。TensorFlow テンソルでは、すべての要素が同じ dtype である必要があります。 したがって、この場合、各列が均一な dtype を持つ列のディクショナリとして扱う必要があります。DataFrame は配列のディクショナリによく似ているため、通常、必要なのは DataFrame を Python dict にキャストするだけです。多くの重要な TensorFlow API は、配列の (ネストされた) ディクショナリを入力としてサポートしています。 tf.data 入力パイプラインはこれを非常にうまく処理します。すべての tf.data 演算は、ディクショナリとタプルを自動的に処理するので、DataFrame からディクショナリのサンプルのデータセットを作成するには、Dataset.from_tensor_slices でスライスする前に、それをディクショナリにキャストするだけです。 End of explanation """ for row in numeric_dict_ds.take(3): print(row) """ Explanation: 以下はデータセットの最初の 3 つのサンプルです。 End of explanation """ def stack_dict(inputs, fun=tf.stack): values = [] for key in sorted(inputs.keys()): values.append(tf.cast(inputs[key], tf.float32)) return fun(values, axis=-1) #@title class MyModel(tf.keras.Model): def __init__(self): # Create all the internal layers in init. super().__init__(self) self.normalizer = tf.keras.layers.Normalization(axis=-1) self.seq = tf.keras.Sequential([ self.normalizer, tf.keras.layers.Dense(10, activation='relu'), tf.keras.layers.Dense(10, activation='relu'), tf.keras.layers.Dense(1) ]) def adapt(self, inputs): # Stack the inputs and `adapt` the normalization layer. inputs = stack_dict(inputs) self.normalizer.adapt(inputs) def call(self, inputs): # Stack the inputs inputs = stack_dict(inputs) # Run them through all the layers. result = self.seq(inputs) return result model = MyModel() model.adapt(dict(numeric_features)) model.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=['accuracy'], run_eagerly=True) """ Explanation: Keras のディクショナリ 通常、Keras モデルとレイヤーは単一の入力テンソルを期待しますが、これらのクラスはディクショナリ、タプル、テンソルのネストされた構造を受け入れて返すことができます。これらの構造は「ネスト」と呼ばれます (詳細については、tf.nest モジュールを参照してください)。 ディクショナリを入力として受け入れる Keras モデルを作成するには、2 つの同等の方法があります。 1. モデルサブクラススタイル tf.keras.Model (または tf.keras.Layer) のサブクラスを記述します。入力を直接処理し、出力を作成します。 End of explanation """ model.fit(dict(numeric_features), target, epochs=5, batch_size=BATCH_SIZE) numeric_dict_batches = numeric_dict_ds.shuffle(SHUFFLE_BUFFER).batch(BATCH_SIZE) model.fit(numeric_dict_batches, epochs=5) """ Explanation: このモデルは、トレーニング用の列のディクショナリまたはディクショナリ要素のデータセットのいずれかを受け入れることができます。 End of explanation """ model.predict(dict(numeric_features.iloc[:3])) """ Explanation: 最初の 3 つのサンプルの予測は次のとおりです。 End of explanation """ inputs = {} for name, column in numeric_features.items(): inputs[name] = tf.keras.Input( shape=(1,), name=name, dtype=tf.float32) inputs x = stack_dict(inputs, fun=tf.concat) normalizer = tf.keras.layers.Normalization(axis=-1) normalizer.adapt(stack_dict(dict(numeric_features))) x = normalizer(x) x = tf.keras.layers.Dense(10, activation='relu')(x) x = tf.keras.layers.Dense(10, activation='relu')(x) x = tf.keras.layers.Dense(1)(x) model = tf.keras.Model(inputs, x) model.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=['accuracy'], run_eagerly=True) tf.keras.utils.plot_model(model, rankdir="LR", show_shapes=True) """ Explanation: 2. Keras 関数型スタイル End of explanation """ model.fit(dict(numeric_features), target, epochs=5, batch_size=BATCH_SIZE) numeric_dict_batches = numeric_dict_ds.shuffle(SHUFFLE_BUFFER).batch(BATCH_SIZE) model.fit(numeric_dict_batches, epochs=5) """ Explanation: モデルサブクラスと同じ方法で関数モデルをトレーニングできます。 End of explanation """ binary_feature_names = ['sex', 'fbs', 'exang'] categorical_feature_names = ['cp', 'restecg', 'slope', 'thal', 'ca'] """ Explanation: 完全なサンプル 異なる型の <code>DataFrame</code> を Keras に渡す場合、各列に対して固有の前処理が必要になる場合があります。この前処理は DataFrame で直接行うことができますが、モデルが正しく機能するためには、入力を常に同じ方法で前処理する必要があります。したがって、最善のアプローチは、前処理をモデルに組み込むことです。<a>Keras 前処理レイヤー</a>は多くの一般的なタスクをカバーしています。 前処理ヘッドを構築する このデータセットでは、生データの「整数」特徴量の一部は実際にはカテゴリインデックスです。これらのインデックスは実際には順序付けられた数値ではありません (詳細については、<a href="https://archive.ics.uci.edu/ml/datasets/heart+Disease" class="external">データセットの説明</a>を参照してください)。これらは順序付けされていないため、モデルに直接フィードするのは不適切です。モデルはそれらを順序付けされたものとして解釈するからです。これらの入力を使用するには、ワンホットベクトルまたは埋め込みベクトルとしてエンコードする必要があります。文字列カテゴリカル特徴量でも同じです。 注意: 同一の前処理を必要とする多くの特徴量がある場合は、前処理を適用する前にそれらを連結すると効率的です。 一方、バイナリ特徴量は、通常、エンコードまたは正規化する必要はありません。 各グループに分類される特徴量のリストを作成することから始めます。 End of explanation """ inputs = {} for name, column in df.items(): if type(column[0]) == str: dtype = tf.string elif (name in categorical_feature_names or name in binary_feature_names): dtype = tf.int64 else: dtype = tf.float32 inputs[name] = tf.keras.Input(shape=(), name=name, dtype=dtype) inputs """ Explanation: 次に、各入力に適切な前処理を適用し、結果を連結する前処理モデルを構築します。 このセクションでは、Keras Functional API を使用して前処理を実装します。まず、データフレームの列ごとに 1 つの tf.keras.Input を作成します。 End of explanation """ preprocessed = [] for name in binary_feature_names: inp = inputs[name] inp = inp[:, tf.newaxis] float_value = tf.cast(inp, tf.float32) preprocessed.append(float_value) preprocessed """ Explanation: 入力ごとに、Keras レイヤーと TensorFlow 演算を使用していくつかの変換を適用します。各特徴量は、スカラーのバッチとして開始されます (shape=(batch,))。それぞれの出力は、tf.float32 ベクトルのバッチ (shape=(batch, n)) である必要があります。最後のステップでは、これらすべてのベクトルを連結します。 バイナリ入力 バイナリ入力は前処理を必要としないため、ベクトル軸を追加し、float32 にキャストして、前処理された入力のリストに追加します。 End of explanation """ normalizer = tf.keras.layers.Normalization(axis=-1) normalizer.adapt(stack_dict(dict(numeric_features))) """ Explanation: 数値入力 前のセクションと同様に、これらの数値入力は、使用する前に tf.keras.layers.Normalization レイヤーを介して実行する必要があります。違いは、ここでは dict として入力されることです。以下のコードは、DataFrame から数値の特徴量を収集し、それらをスタックし、Normalization.adapt メソッドに渡します。 End of explanation """ numeric_inputs = {} for name in numeric_feature_names: numeric_inputs[name]=inputs[name] numeric_inputs = stack_dict(numeric_inputs) numeric_normalized = normalizer(numeric_inputs) preprocessed.append(numeric_normalized) preprocessed """ Explanation: 以下のコードは、数値特徴量をスタックし、それらを正規化レイヤーで実行します。 End of explanation """ vocab = ['a','b','c'] lookup = tf.keras.layers.StringLookup(vocabulary=vocab, output_mode='one_hot') lookup(['c','a','a','b','zzz']) vocab = [1,4,7,99] lookup = tf.keras.layers.IntegerLookup(vocabulary=vocab, output_mode='one_hot') lookup([-1,4,1]) """ Explanation: カテゴリカル特徴量 カテゴリカル特徴量を使用するには、最初にそれらをバイナリベクトルまたは埋め込みのいずれかにエンコードする必要があります。これらの特徴量には少数のカテゴリしか含まれていないため、tf.keras.layers.StringLookup および tf.keras.layers.IntegerLookup レイヤーの両方でサポートされている output_mode='one_hot' オプションを使用して、入力をワンホットベクトルに直接変換します。 次に、これらのレイヤーがどのように機能するかの例を示します。 End of explanation """ for name in categorical_feature_names: vocab = sorted(set(df[name])) print(f'name: {name}') print(f'vocab: {vocab}\n') if type(vocab[0]) is str: lookup = tf.keras.layers.StringLookup(vocabulary=vocab, output_mode='one_hot') else: lookup = tf.keras.layers.IntegerLookup(vocabulary=vocab, output_mode='one_hot') x = inputs[name][:, tf.newaxis] x = lookup(x) preprocessed.append(x) """ Explanation: 各入力の語彙を決定するには、その語彙をワンホットベクトルに変換するレイヤーを作成します。 End of explanation """ preprocessed """ Explanation: 前処理ヘッドを組み立てる この時点で、preprocessed はすべての前処理結果の Python リストであり、各結果は (batch_size, depth) の形状をしています。 End of explanation """ preprocesssed_result = tf.concat(preprocessed, axis=-1) preprocesssed_result """ Explanation: 前処理されたすべての特徴量を depth 軸に沿って連結し、各ディクショナリのサンプルを単一のベクトルに変換します。ベクトルには、カテゴリカル特徴量、数値特徴量、およびカテゴリワンホット特徴量が含まれています。 End of explanation """ preprocessor = tf.keras.Model(inputs, preprocesssed_result) tf.keras.utils.plot_model(preprocessor, rankdir="LR", show_shapes=True) """ Explanation: 次に、その計算からモデルを作成して、再利用できるようにします。 End of explanation """ preprocessor(dict(df.iloc[:1])) """ Explanation: プリプロセッサをテストするには、<a href="https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.iloc.html" class="external">DataFrame.iloc</a> アクセサを使用して、DataFrame から最初のサンプルをスライスします。次に、それをディクショナリに変換し、ディクショナリをプリプロセッサに渡します。結果は、バイナリ特徴量、正規化された数値特徴量、およびワンホットカテゴリカル特徴量をこの順序で含む単一のベクトルになります。 End of explanation """ body = tf.keras.Sequential([ tf.keras.layers.Dense(10, activation='relu'), tf.keras.layers.Dense(10, activation='relu'), tf.keras.layers.Dense(1) ]) """ Explanation: モデルを作成して訓練する 次に、モデルの本体を作成します。前の例と同じ構成を使用します。分類には、いくつかの Dense 正規化線形レイヤーと Dense(1) 出力レイヤーを使用します。 End of explanation """ inputs x = preprocessor(inputs) x result = body(x) result model = tf.keras.Model(inputs, result) model.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=['accuracy']) """ Explanation: 次に、Keras 関数型 API を使用して 2 つの部分を組み合わせます。 End of explanation """ history = model.fit(dict(df), target, epochs=5, batch_size=BATCH_SIZE) """ Explanation: このモデルは、入力のディクショナリを想定しています。データを渡す最も簡単な方法は、DataFrame を dict に変換し、その dict を x 引数として Model.fit に渡すことです。 End of explanation """ ds = tf.data.Dataset.from_tensor_slices(( dict(df), target )) ds = ds.batch(BATCH_SIZE) import pprint for x, y in ds.take(1): pprint.pprint(x) print() print(y) history = model.fit(ds, epochs=5) """ Explanation: tf.data を使用しても同様に機能します。 End of explanation """
samuxiii/notebooks
houses/House Prices.ipynb
apache-2.0
import numpy as np import pandas as pd #load the files train = pd.read_csv('input/train.csv') test = pd.read_csv('input/test.csv') data = pd.concat([train, test]) #size of training dataset train_samples = train.shape[0] #print some of them data.head() # remove the Id feature data.drop(['Id'],1, inplace=True); data.info() """ Explanation: House Prices Estimator Note: It's a competition from Kaggle.com and the input data was retrieved from there. Details Goal It is your job to predict the sales price for each house. For each Id in the test set, you must predict the value of the SalePrice variable. Metric Submissions are evaluated on Root-Mean-Squared-Error (RMSE) between the logarithm of the predicted value and the logarithm of the observed sales price. (Taking logs means that errors in predicting expensive houses and cheap houses will affect the result equally.) Submission File Format The file should contain a header and have the following format: Id,SalePrice 1461,169000.1 1462,187724.1233 1463,175221 etc. TODO Use another algorithm to predict the house price More feature engineering Add more comments, thoughts, conclusions, ... Come up with new ideas.. Data Analysis End of explanation """ print("Size training: {}".format(train.shape[0])) print("Size testing: {}".format(test.shape[0])) """ Explanation: First problem The training and test datasets have almost the same size. End of explanation """ datanum = data.select_dtypes([np.number]) datanum.describe() data.select_dtypes(exclude=[np.number]).head() """ Explanation: Selecting only numeric columns (by now) End of explanation """ datanum.columns[datanum.isnull().any()].tolist() #number of row without NaN print(datanum.shape[0] - datanum.dropna().shape[0]) #list of columns with NaN datanum.columns[datanum.isnull().any()].tolist() #Filling with the mean datanum_no_nan = datanum.fillna(datanum.mean()) #check datanum_no_nan.columns[datanum_no_nan.isnull().any()].tolist() """ Explanation: Find if there's null values End of explanation """ import matplotlib.pyplot as plt datanum_no_nan.drop(['SalePrice'], axis=1).head(15).plot() plt.show() #Squeeze the data to [0,1] from sklearn import preprocessing scaler = preprocessing.MinMaxScaler() columns = datanum_no_nan.columns columns = columns.drop('SalePrice') print("Features: {}".format(columns)) data_norm = datanum_no_nan data_norm[columns] = scaler.fit_transform(datanum_no_nan[columns]) print("Train shape: {}".format(data_norm.shape)) data_norm.drop(['SalePrice'], axis=1).head(15).plot() plt.show() data_norm.describe().T #plotting distributions of numeric features data_norm.hist(bins=50, figsize=(22,16)) plt.show() """ Explanation: Normalizing End of explanation """ data_norm['1stFlrSF'].hist() plt.show() #transform the data so it's closest to normal from scipy import stats data_gauss = data_norm.copy() for f in datanum.columns.tolist(): data_gauss[f], _ = stats.boxcox(data_gauss[f]+0.01) #rescale again std_scaler = preprocessing.StandardScaler() data_gauss[columns] = std_scaler.fit_transform(data_gauss[columns]) data_gauss['1stFlrSF'].hist() plt.show() #plotting distributions of numeric features data_gauss.hist(bins=50, figsize=(22,16)) plt.show() """ Explanation: Using Box-Cox End of explanation """ #include no numbers columns data.select_dtypes(exclude=[np.number]).head() data_categorical = pd.get_dummies(data.select_dtypes(exclude=[np.number])) data_all = pd.concat([data_norm, data_categorical], axis=1) """ Explanation: Splitting dataset in train and test (getting batches) End of explanation """ #data_norm.columns.tolist() feat_list = ['1stFlrSF', #'2ndFlrSF', #'3SsnPorch', 'BedroomAbvGr', 'BsmtFinSF1', #'BsmtFinSF2', #'BsmtFullBath', #'BsmtHalfBath', 'BsmtUnfSF', #'EnclosedPorch', #'Fireplaces', #'FullBath', 'GarageArea', 'GarageCars', 'GarageYrBlt', #'GrLivArea', #'HalfBath', #'KitchenAbvGr', 'LotArea', 'LotFrontage', #'LowQualFinSF', 'MSSubClass', 'MasVnrArea', #'MiscVal', 'MoSold', 'OpenPorchSF', 'OverallCond', 'OverallQual', 'PoolArea', #'SalePrice', #'ScreenPorch', 'TotRmsAbvGrd', 'TotalBsmtSF', 'WoodDeckSF', 'YearBuilt', 'YearRemodAdd'] #'YrSold'] %matplotlib inline import seaborn as sns fig = plt.figure(figsize=(14, 10)) sns.heatmap(data_norm[feat_list+['SalePrice']].corr()) #heatmap fig = plt.figure(figsize=(14, 10)) sns.heatmap(data_norm.corr()) # Correlation features data_norm.corr()['SalePrice'].sort_values().tail(13) feat_low_corr = ['KitchenAbvGr', 'EnclosedPorch', 'MSSubClass', 'OverallCond', 'YrSold', 'LowQualFinSF', 'MiscVal', 'BsmtHalfBath', 'BsmtFinSF2', 'MoSold', '3SsnPorch', 'PoolArea', 'ScreenPorch'] feat_high_corr = ['Fireplaces', 'MasVnrArea', 'YearRemodAdd', 'YearBuilt', 'TotRmsAbvGrd', 'FullBath', '1stFlrSF', 'TotalBsmtSF', 'GarageArea', 'GarageCars', 'GrLivArea', 'OverallQual'] data_norm_low_corr = data_norm[feat_low_corr] data_norm_high_corr = data_norm[feat_high_corr] """ Explanation: Selecting good features... End of explanation """ from sklearn.model_selection import KFold y = np.array(data_all['SalePrice']) X = np.array(data_norm_high_corr) #split by idx idx = train_samples X_train, X_test = X[:idx], X[idx:] y_train, y_test = y[:idx], y[idx:] print("Shape X train: {}".format(X_train.shape)) print("Shape y train: {}".format(y_train.shape)) print("Shape X test: {}".format(X_test.shape)) print("Shape y test: {}".format(y_test.shape)) kf = KFold(n_splits=3, random_state=9, shuffle=True) print(kf) """ Explanation: KFold End of explanation """ #plotting PCA from sklearn.decomposition import PCA def plotPCA(X, y): pca = PCA(n_components=1) X_r = pca.fit(X).transform(X) plt.plot(X_r, y, 'x') from sklearn.covariance import EllipticEnvelope # fit the model ee = EllipticEnvelope(contamination=0.05, assume_centered=True, random_state=9) ee.fit(X_train) pred = ee.predict(X_train) X_train = X_train[pred == 1] y_train = y_train[pred == 1] print(X_train.shape) print(y_train.shape) #after removing anomalies plotPCA(X_train, y_train) """ Explanation: Anomaly Detection End of explanation """ from sklearn.neural_network import MLPRegressor from sklearn.metrics import mean_squared_error rf = MLPRegressor(activation='relu', solver='lbfgs', #learning_rate_init=1e-2, #learning_rate='adaptive', #alpha=0.0001, max_iter=400, #shuffle=True, hidden_layer_sizes=(64,64), warm_start=True, random_state=9, verbose=False) for e in range(1): batch = 1; for train_idx, val_idx in kf.split(X_train, y_train): X_t, X_v = X_train[train_idx], X_train[val_idx] y_t, y_v = y_train[train_idx], y_train[val_idx] #training rf.fit(X_t, y_t) #calculate costs t_error = mean_squared_error(y_t, rf.predict(X_t))**0.5 v_error = mean_squared_error(y_v, rf.predict(X_v))**0.5 print("{}-{}) Training error: {:.2f} Validation error: {:.2f}".format(e, batch, t_error, v_error)) batch += 1 #Scores print("Training score: {:.4f}".format(rf.score(X_train, y_train))) # Gradient boosting from sklearn import ensemble params = {'n_estimators': 100, 'max_depth': 50, 'min_samples_split': 5, 'learning_rate': 0.1, 'loss': 'ls', 'random_state':9, 'warm_start':True} gbr = ensemble.GradientBoostingRegressor(**params) batch = 0 for train_idx, val_idx in kf.split(X_train, y_train): X_t, X_v = X_train[train_idx], X_train[val_idx] y_t, y_v = y_train[train_idx], y_train[val_idx] #training gbr.fit(X_t, y_t) #calculate costs t_error = mean_squared_error(y_t, gbr.predict(X_t))**0.5 v_error = mean_squared_error(y_v, gbr.predict(X_v))**0.5 print("{}) Training error: {:.2f} Validation error: {:.2f}".format(batch, t_error, v_error)) batch += 1 #Scores print("Training score: {:.4f}".format(gbr.score(X_train, y_train))) # AdaBoost from sklearn.ensemble import AdaBoostRegressor from sklearn.tree import DecisionTreeRegressor abr = AdaBoostRegressor(DecisionTreeRegressor(max_depth=50), n_estimators=100, random_state=9) batch = 0 for train_idx, val_idx in kf.split(X_train, y_train): X_t, X_v = X_train[train_idx], X_train[val_idx] y_t, y_v = y_train[train_idx], y_train[val_idx] #training abr.fit(X_t, y_t) #calculate costs t_error = mean_squared_error(y_t, abr.predict(X_t))**0.5 v_error = mean_squared_error(y_v, abr.predict(X_v))**0.5 print("{}) Training error: {:.2f} Validation error: {:.2f}".format(batch, t_error, v_error)) batch += 1 #Scores print("Training score: {:.4f}".format(abr.score(X_train, y_train))) # Lasso from sklearn.linear_model import Lasso lr = Lasso() batch = 0 for train_idx, val_idx in kf.split(X_train, y_train): X_t, X_v = X_train[train_idx], X_train[val_idx] y_t, y_v = y_train[train_idx], y_train[val_idx] #training lr.fit(X_t, y_t) #calculate costs t_error = mean_squared_error(y_t, lr.predict(X_t))**0.5 v_error = mean_squared_error(y_v, lr.predict(X_v))**0.5 print("{}) Training error: {:.2f} Validation error: {:.2f}".format(batch, t_error, v_error)) batch += 1 #Scores print("Training score: {:.4f}".format(lr.score(X_train, y_train))) """ Explanation: Models Multilayer Perceptron End of explanation """ ### Testing ### Ada + mlp + gradient boosting -> level 1 predictions ### level 1 -> mlp -> level 2 predictions (final) # Training #mlp1 = MLPRegressor(activation='logistic', # solver='sgd', # hidden_layer_sizes=(5,5), # learning_rate='adaptive', # random_state=9, # warm_start=True, # verbose=False) from sklearn.linear_model import LogisticRegression mlp = LogisticRegression(random_state=9) sclr = preprocessing.StandardScaler() def stack_training(X, y): X0 = rf.predict(X) X1 = gbr.predict(X) X2 = abr.predict(X) X3 = lr.predict(X) Xt = np.array([X0, X1, X2, X3]).T #Xt = np.array([X0, X1, X2, X3, X1+X3, X2*X3, X0*X2*X3, X0/X2, X1/X3, X0/X3, (X0+X1+X2+X3)/4]).T Xt = sclr.fit_transform(Xt) mlp.fit(Xt, y) def stack_predict(X, verbose=False): X0 = rf.predict(X) X1 = gbr.predict(X) X2 = abr.predict(X) X3 = lr.predict(X) Xt = np.array([X0, X1, X2, X3]).T #Xt = np.array([X0, X1, X2, X3, X1+X3, X2*X3, X0*X2*X3, X0/X2, X1/X3, X0/X3, (X0+X1+X2+X3)/4]).T Xt = sclr.transform(Xt) if verbose: print("Training score: {:.4f}".format(mlp.score(Xt, y_train))) plotPCA(Xt, y_train) return mlp.predict(Xt) # batch = 0 kf = KFold(n_splits=10, random_state=9, shuffle=True) for train_idx, val_idx in kf.split(X_train, y_train): X_t, X_v = X_train[train_idx], X_train[val_idx] y_t, y_v = y_train[train_idx], y_train[val_idx] #training stack_training(X_t, y_t) #calculate costs t_error = mean_squared_error(y_t, abr.predict(X_t))**0.5 v_error = mean_squared_error(y_v, abr.predict(X_v))**0.5 print("{}) Training error: {:.2f} Validation error: {:.2f}".format(batch, t_error, v_error)) batch += 1 rmse = mean_squared_error(y_train, stack_predict(X_train, True))**0.5 print("RMSE: {:.4f}".format(rmse)) """ Explanation: Stacked model End of explanation """ from sklearn.metrics import mean_squared_error import random RMSE_rf = mean_squared_error(y_train, rf.predict(X_train))**0.5 RMSE_gbr = mean_squared_error(y_train, gbr.predict(X_train))**0.5 RMSE_abr = mean_squared_error(y_train, abr.predict(X_train))**0.5 RMSE_lr = mean_squared_error(y_train, lr.predict(X_train))**0.5 RMSE_stack = mean_squared_error(y_train, stack_predict(X_train))**0.5 def avg_predict(X): return (rf.predict(X) + gbr.predict(X) + abr.predict(X) + lr.predict(X))/4 predictions = avg_predict(X_train) RMSE_total = mean_squared_error(y_train, predictions)**0.5 print("RMSE mlp: {:.3f}".format(RMSE_rf)) print("RMSE gbr: {:.3f}".format(RMSE_gbr)) print("RMSE abr: {:.3f}".format(RMSE_abr)) print("RMSE lr: {:.3f}".format(RMSE_lr)) print("====") print("RMSE average: {:.3f}".format(RMSE_total)) print("RMSE stacked: {:.3f}".format(RMSE_stack)) """ Explanation: Evaluation It has to be used the root mean squared error, RMSE. End of explanation """ import os #predict = avg_predict(X_test) predict = stack_predict(X_test) file = "Id,SalePrice" + os.linesep startId = 1461 for i in range(len(X_test)): file += "{},{}".format(startId, (int)(predict[i])) + os.linesep startId += 1 #print(file) # Save to file with open('attempt.txt', 'w') as f: f.write(file) """ Explanation: Get Predictions Good results without data_gauss End of explanation """
abhinavsingh/proxy.py
tutorial/http_parser.ipynb
bsd-3-clause
from proxy.http.methods import httpMethods from proxy.http.parser import HttpParser, httpParserTypes, httpParserStates from proxy.common.constants import HTTP_1_1 get_request = HttpParser(httpParserTypes.REQUEST_PARSER) get_request.parse(memoryview(b'GET / HTTP/1.1\r\nHost: jaxl.com\r\n\r\n')) print(get_request.build()) assert get_request.is_complete assert get_request.method == httpMethods.GET assert get_request.version == HTTP_1_1 assert get_request.host == None assert get_request.port == 80 assert get_request._url != None assert get_request._url.remainder == b'/' assert get_request.has_header(b'host') assert get_request.header(b'host') == b'jaxl.com' assert len(get_request.headers) == 1 """ Explanation: HttpParser HttpParser class is at the heart of everything related to HTTP. It is used by Web server and Proxy server core and their plugin eco-system. As the name suggests, it is capable of parsing both HTTP request and response packets. It can also parse HTTP look-a-like protocols like ICAP, SIP etc. Most importantly, remember that HttpParser was originally written to handle HTTP packets arriving in the context of a proxy server and till date its default behavior favors the same flavor. Let's start by parsing a HTTP web request using HttpParser End of explanation """ proxy_request = HttpParser(httpParserTypes.REQUEST_PARSER) proxy_request.parse(memoryview(b'GET http://jaxl.com/ HTTP/1.1\r\nHost: jaxl.com\r\n\r\n')) print(proxy_request.build()) print(proxy_request.build(for_proxy=True)) assert proxy_request.is_complete assert proxy_request.method == httpMethods.GET assert proxy_request.version == HTTP_1_1 assert proxy_request.host == b'jaxl.com' assert proxy_request.port == 80 assert proxy_request._url != None assert proxy_request._url.remainder == b'/' assert proxy_request.has_header(b'host') assert proxy_request.header(b'host') == b'jaxl.com' assert len(proxy_request.headers) == 1 """ Explanation: Next, let's parse a HTTP proxy request using HttpParser End of explanation """ connect_request = HttpParser(httpParserTypes.REQUEST_PARSER) connect_request.parse(memoryview(b'CONNECT jaxl.com:443 HTTP/1.1\r\nHost: jaxl.com:443\r\n\r\n')) print(connect_request.build()) print(connect_request.build(for_proxy=True)) assert connect_request.is_complete assert connect_request.is_https_tunnel assert connect_request.version == HTTP_1_1 assert connect_request.host == b'jaxl.com' assert connect_request.port == 443 assert connect_request._url != None assert connect_request._url.remainder == None assert connect_request.has_header(b'host') assert connect_request.header(b'host') == b'jaxl.com:443' assert len(connect_request.headers) == 1 """ Explanation: Notice how proxy_request.build() and proxy_request.build(for_proxy=True) behave. Also, notice how proxy_request.host field is populated for a HTTP proxy packet but not for the prior HTTP web request packet example. To conclude, let's parse a HTTPS proxy request End of explanation """
jpn--/larch
book/example/017_mnl_final.ipynb
gpl-3.0
# TEST import larch.numba as lx import larch import pandas as pd pd.set_option("display.max_columns", 999) pd.set_option('expand_frame_repr', False) pd.set_option('display.precision', 3) larch._doctest_mode_ = True """ Explanation: 17: MTC Expanded MNL Mode Choice End of explanation """ import larch.numba as lx d = lx.examples.MTC(format='dataset') m = lx.Model(d) """ Explanation: For this example, we're going to re-create model 17 from the Self Instructing Manual. (pp. 128) End of explanation """ m.availability_var = 'avail' m.choice_ca_var = 'chose' from larch.roles import P, X m.utility_ca = ( + X("totcost/hhinc") * P("costbyincome") + X("tottime * (altnum <= 4)") * P("motorized_time") + X("tottime * (altnum >= 5)") * P("nonmotorized_time") + X("ovtt/dist * (altnum <= 4)") * P("motorized_ovtbydist") ) """ Explanation: We will use the usual choice and availability variables. End of explanation """ for a in [4,5,6]: m.utility_co[a] += X("hhinc") * P("hhinc#{}".format(a)) """ Explanation: The "totcost/hhinc" data is computed once as a new variable when loading the model data. The same applies for tottime filtered by motorized modes (we harness the convenient fact that all the motorized modes have identifying numbers 4 or less), and "ovtt/dist". End of explanation """ for i in d['alt_names'][1:3]: name = str(i.values) a = int(i.altid) m.utility_co[a] += ( + X("vehbywrk") * P("vehbywrk_SR") + X("wkccbd+wknccbd") * P("wkcbd_"+name) + X("wkempden") * P("wkempden_"+name) + P("ASC_"+name) ) for i in d['alt_names'][3:]: name = str(i.values) a = int(i.altid) m.utility_co[a] += ( + X("vehbywrk") * P("vehbywrk_"+name) + X("wkccbd+wknccbd") * P("wkcbd_"+name) + X("wkempden") * P("wkempden_"+name) + P("ASC_"+name) ) """ Explanation: Since the model we want to create groups together DA, SR2 and SR3+ jointly as reference alternatives with respect to income, we can simply omit all of these alternatives from the block that applies to hhinc. For vehicles per worker, the preferred model include a joint parameter on SR2 and SR3+, but not including DA and not fixed at zero. Here we might use a shadow_parameter (also called an alias in some places), which allows us to specify one or more parameters that are simply a fixed proportion of another parameter. For example, we can say that vehbywrk_SR2 will be equal to vehbywrk_SR. End of explanation """ m.ordering = ( ('LOS', ".*cost.*", ".*time.*", ".*dist.*",), ('Zonal', "wkcbd.*", "wkempden.*",), ('Household', "hhinc.*", "vehbywrk.*",), ('ASCs', "ASC.*",), ) """ Explanation: We didn't explicitly define our parameters first, which is fine; Larch will find them in the utility functions (or elsewhere in more complex models). But they may be found in a weird order that is hard to read in reports. We can define an ordering scheme by assigning to the parameter_groups attribute, like this: End of explanation """ m.maximize_loglike() # TEST r = _ from pytest import approx assert r.loglike == approx(-3444.185105027836) assert r.n_cases == 5029 assert 'success' in r.message.lower() assert r.x.to_dict() == approx({ 'ASC_Bike': -1.6288174781480145, 'ASC_SR2': -1.8077821796310174, 'ASC_SR3+': -3.4336998987834213, 'ASC_Transit': -0.6850205869302504, 'ASC_Walk': 0.06826615821030824, 'costbyincome': -0.05239236004239274, 'hhinc#4': -0.0053231144110710265, 'hhinc#5': -0.008643179890815506, 'hhinc#6': -0.005997795266774085, 'motorized_ovtbydist': -0.1328389672470942, 'motorized_time': -0.02018676908268187, 'nonmotorized_time': -0.04544467417768392, 'vehbywrk_Bike': -0.7021221804213855, 'vehbywrk_SR': -0.31664078667048384, 'vehbywrk_Transit': -0.9462364952409247, 'vehbywrk_Walk': -0.7218049107571212, 'wkcbd_Bike': 0.48936706067828845, 'wkcbd_SR2': 0.25986035009653136, 'wkcbd_SR3+': 1.069304378606234, 'wkcbd_Transit': 1.308896887615559, 'wkcbd_Walk': 0.10177663194876692, 'wkempden_Bike': 0.0019282498545339284, 'wkempden_SR2': 0.0015778182187284415, 'wkempden_SR3+': 0.002257039208670294, 'wkempden_Transit': 0.003132740135033535, 'wkempden_Walk': 0.0028906014986955593, }) m.calculate_parameter_covariance() m.parameter_summary() # TEST assert m.pf.t_stat.to_dict() == approx({ 'ASC_Bike': -3.8110051632761968, 'ASC_SR2': -17.03471916394958, 'ASC_SR3+': -22.610264384635116, 'ASC_Transit': -2.764269785206984, 'ASC_Walk': 0.19617043561070976, 'costbyincome': -5.0360570040949515, 'hhinc#4': -2.6923847354101915, 'hhinc#5': -1.676857732750138, 'hhinc#6': -1.9049215648409885, 'motorized_ovtbydist': -6.763234843764025, 'motorized_time': -5.291965825624687, 'nonmotorized_time': -7.878190061966541, 'vehbywrk_Bike': -2.7183965402594508, 'vehbywrk_SR': -4.751992210976383, 'vehbywrk_Transit': -7.999145737275119, 'vehbywrk_Walk': -4.261234830020787, 'wkcbd_Bike': 1.3552321494507682, 'wkcbd_SR2': 2.1066605695091867, 'wkcbd_SR3+': 5.590372196382326, 'wkcbd_Transit': 7.899400934474615, 'wkcbd_Walk': 0.40370690248331875, 'wkempden_Bike': 1.5864614051558108, 'wkempden_SR2': 4.042074989321517, 'wkempden_SR3+': 4.993778175062689, 'wkempden_Transit': 8.684498489531592, 'wkempden_Walk': 3.8952326996888065, }) assert m.pf.robust_t_stat.to_dict() == approx({ 'ASC_Bike': -3.350788895379893, 'ASC_SR2': -15.450849978191432, 'ASC_SR3+': -22.047875467016553, 'ASC_Transit': -2.546641253284614, 'ASC_Walk': 0.19546387137430002, 'costbyincome': -3.927312777634008, 'hhinc#4': -2.6000468880002883, 'hhinc#5': -1.448502844590286, 'hhinc#6': -1.7478834622063846, 'motorized_ovtbydist': -5.512721233692836, 'motorized_time': -5.1781560789822985, 'nonmotorized_time': -7.890366874224642, 'vehbywrk_Bike': -2.26956809717166, 'vehbywrk_SR': -4.1884543094363345, 'vehbywrk_Transit': -6.907359588761182, 'vehbywrk_Walk': -3.552049105845569, 'wkcbd_Bike': 1.3353508709464412, 'wkcbd_SR2': 2.1061572488997933, 'wkcbd_SR3+': 5.629597757231176, 'wkcbd_Transit': 8.258699769521979, 'wkcbd_Walk': 0.3932045643537346, 'wkempden_Bike': 1.640126229774069, 'wkempden_SR2': 3.8222350454916496, 'wkempden_SR3+': 4.974652568010134, 'wkempden_Transit': 8.178299823852544, 'wkempden_Walk': 4.06724937563278, }) # TEST # model also works for IDCE df = pd.read_csv(lx.example_file("MTCwork.csv.gz"), index_col=['casenum','altnum']) df.index = df.index.rename('altid', level=1) df['altnum'] = df.index.get_level_values(1) m.datatree = lx.Dataset.construct.from_idce(df) m.availability_var = '1' assert m.loglike() == approx(-3444.185105027836) assert m.n_cases == 5029 assert 'ca' not in m.dataset assert m.dataset['ce_data'].shape == (22033,4) """ Explanation: Each item in parameter_ordering is a tuple, with a label and one or more regular expressions, which will be compared against all the parameter names. Any names that match will be pulled out and put into the reporting order sequentially. Thus if a parameter name would match more than one regex, it will appear in the ordering only for the first match. Having created this model, we can then estimate it: End of explanation """
Yu-Group/scikit-learn-sandbox
jupyter/backup_deprecated_nbs/14_Check_Utils_pep8.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt from sklearn.datasets import load_iris from sklearn.datasets import load_breast_cancer import numpy as np from functools import reduce # Import our custom utilities from imp import reload from utils import irf_jupyter_utils from utils import irf_utils reload(irf_jupyter_utils) reload(irf_utils) """ Explanation: Key Requirements for the iRF scikit-learn implementation The following is a documentation of the main requirements for the iRF implementation Typical Setup End of explanation """ %timeit X_train, X_test, y_train, y_test, rf = irf_jupyter_utils.generate_rf_example(sklearn_ds = load_breast_cancer()) """ Explanation: Step 1: Fit the Initial Random Forest Just fit every feature with equal weights per the usual random forest code e.g. DecisionForestClassifier in scikit-learn End of explanation """ print("Training feature dimensions", X_train.shape, sep = ":\n") print("\n") print("Training outcome dimensions", y_train.shape, sep = ":\n") print("\n") print("Test feature dimensions", X_test.shape, sep = ":\n") print("\n") print("Test outcome dimensions", y_test.shape, sep = ":\n") print("\n") print("first 5 rows of the training set features", X_train[:5], sep = ":\n") print("\n") print("first 5 rows of the training set outcomes", y_train[:5], sep = ":\n") X_train.shape[0] breast_cancer = load_breast_cancer() breast_cancer.data.shape[0] """ Explanation: Check out the data End of explanation """ # Import our custom utilities rf.n_estimators estimator0 = rf.estimators_[0] # First tree estimator1 = rf.estimators_[1] # Second tree estimator2 = rf.estimators_[2] # Second tree """ Explanation: Step 2: For each Tree get core leaf node features For each decision tree in the classifier, get: The list of leaf nodes Depth of the leaf node Leaf node predicted class i.e. {0, 1} Probability of predicting class in leaf node Number of observations in the leaf node i.e. weight of node Get the 2 Decision trees to use for testing End of explanation """ tree_dat0 = irf_utils.get_tree_data(X_train = X_train, dtree = estimator0, root_node_id = 0) tree_dat1 = irf_utils.get_tree_data(X_train = X_train, dtree = estimator1, root_node_id = 0) tree_dat1 = irf_utils.get_tree_data(X_train = X_train, dtree = estimator2, root_node_id = 0) """ Explanation: Design the single function to get the key tree information Get data from the first and second decision tree End of explanation """ # Now plot the trees individually irf_jupyter_utils.draw_tree(decision_tree = estimator0) irf_jupyter_utils.pretty_print_dict(inp_dict = tree_dat0) # Count the number of samples passing through the leaf nodes sum(tree_dat0['tot_leaf_node_values']) """ Explanation: Decision Tree 0 (First) - Get output Check the output against the decision tree graph End of explanation """ feature_importances = rf.feature_importances_ std = np.std([dtree.feature_importances_ for dtree in rf.estimators_] , axis=0) feature_importances_rank_idx = np.argsort(feature_importances)[::-1] # Check that the feature importances are standardized to 1 print(sum(feature_importances)) """ Explanation: Step 3: Get the Gini Importance of Weights for the Random Forest For the first random forest we just need to get the Gini Importance of Weights Step 3.1 Get them numerically - most important End of explanation """ # Print the feature ranking print("Feature ranking:") for f in range(X_train.shape[1]): print("%d. feature %d (%f)" % (f + 1 , feature_importances_rank_idx[f] , feature_importances[feature_importances_rank_idx[f]])) # Plot the feature importances of the forest plt.figure() plt.title("Feature importances") plt.bar(range(X_train.shape[1]) , feature_importances[feature_importances_rank_idx] , color="r" , yerr = std[feature_importances_rank_idx], align="center") plt.xticks(range(X_train.shape[1]), feature_importances_rank_idx) plt.xlim([-1, X_train.shape[1]]) plt.show() """ Explanation: Step 3.2 Display Feature Importances Graphically (just for interest) End of explanation """ # Import our custom utilities from imp import reload from utils import irf_jupyter_utils from utils import irf_utils reload(irf_jupyter_utils) reload(irf_utils) rf.n_classes_ estimator0.n_classes_ type(rf).__name__ rf_metrics = irf_utils.get_validation_metrics(inp_class_reg_obj = rf, y_true = y_test, X_test = X_test) rf_metrics['confusion_matrix'] # CHECK: If the random forest objects are going to be really large in size # we could just omit them and only return our custom summary outputs rf_metrics = irf_utils.get_validation_metrics(inp_class_reg_obj = rf, y_true = y_test, X_test = X_test) all_rf_outputs = {"rf_obj" : rf, "feature_importances" : feature_importances, "feature_importances_rank_idx" : feature_importances_rank_idx, "rf_metrics" : rf_metrics} # CHECK: The following should be paralellized! # CHECK: Whether we can maintain X_train correctly as required for idx, dtree in enumerate(rf.estimators_): dtree_out = irf_utils.get_tree_data(X_train = X_train, dtree = dtree, root_node_id = 0) # Append output to dictionary all_rf_outputs["dtree" + str(idx)] = dtree_out estimator0_out = irf_utils.get_tree_data(X_train=X_train, dtree=estimator0, root_node_id=0) print(estimator0_out['all_leaf_nodes']) """ Explanation: Putting it all together Create a dictionary object to include all of the random forest objects End of explanation """ print(estimator0_out['all_leaf_nodes']) print(sum(estimator0_out['tot_leaf_node_values'])) print(estimator0_out['tot_leaf_node_values']) print(estimator0_out['all_leaf_node_samples']) print(estimator0.tree_.n_node_samples[0]) print([round(i, 1) for i in estimator0_out['all_leaf_node_samples_percent']]) print(sum(estimator0_out['all_leaf_node_samples_percent'])) """ Explanation: Examine Individual Decision Tree Output End of explanation """ irf_jupyter_utils.pretty_print_dict(inp_dict = all_rf_outputs) """ Explanation: Check the final dictionary of outputs End of explanation """ irf_jupyter_utils.pretty_print_dict(inp_dict = all_rf_outputs['rf_metrics']) all_rf_outputs['dtree0'] """ Explanation: Now we can start setting up the RIT class Overview At it's core, the RIT is comprised of 3 main modules * FILTERING: Subsetting to either the 1's or the 0's * RANDOM SAMPLING: The path-nodes in a weighted manner, with/ without replacement, within tree/ outside tree * INTERSECTION: Intersecting the selected node paths in a systematic manner For now we will just work with a single decision tree outputs End of explanation """ uniq_feature_paths = all_rf_outputs['dtree0']['all_uniq_leaf_paths_features'] leaf_node_classes = all_rf_outputs['dtree0']['all_leaf_node_classes'] ones_only = [i for i, j in zip(uniq_feature_paths, leaf_node_classes) if j == 1] ones_only print("Number of leaf nodes", len(all_rf_outputs['dtree0']['all_uniq_leaf_paths_features']), sep = ":\n") print("Number of leaf nodes with 1 class", len(ones_only), sep = ":\n") # Just pick the last seven cases, we are going to manually construct # binary RIT of depth 3 i.e. max 2**3 -1 = 7 intersecting nodes ones_only_seven = ones_only[-7:] ones_only_seven # Construct a binary version of the RIT manually! # This should come in useful for unit tests! node0 = ones_only_seven[-1] node1 = np.intersect1d(node0, ones_only_seven[-2]) node2 = np.intersect1d(node1, ones_only_seven[-3]) node3 = np.intersect1d(node1, ones_only_seven[-4]) node4 = np.intersect1d(node0, ones_only_seven[-5]) node5 = np.intersect1d(node4, ones_only_seven[-6]) node6 = np.intersect1d(node4, ones_only_seven[-7]) intersected_nodes_seven = [node0, node1, node2, node3, node4, node5, node6] for idx, node in enumerate(intersected_nodes_seven): print("node" + str(idx), node) rit_output = reduce(np.union1d, (node2, node3, node5, node6)) rit_output from sklearn.datasets import load_breast_cancer from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier raw_data = load_breast_cancer() X_train, X_test, y_train, y_test = train_test_split( raw_data.data, raw_data.target, train_size=0.9, random_state=2017) rf = RandomForestClassifier( n_estimators=3, random_state=2018) rf.fit(X=X_train, y=y_train) estimator0 = rf.estimators_[0] estimator0_out = irf_utils.get_tree_data(X_train=X_train, dtree=estimator0, root_node_id=0) print(estimator0_out['all_leaf_nodes']) """ Explanation: Get the leaf node 1's paths Get the unique feature paths where the leaf node predicted class is just 1 End of explanation """
USCDataScience/parser-indexer-py
notebooks/all-ner/MTE_NER.ipynb
apache-2.0
%load_ext autoreload %autoreload 2 %matplotlib inline from snorkel import SnorkelSession import os import numpy as np import re, string import codecs # Open Session session = SnorkelSession() """ Explanation: NER using Data Programming Project Mars Target Encyclopedia This notebook does not explain much, however, the exaplanations are found in the original notebook(s) https://github.com/HazyResearch/snorkel/tree/master/tutorials/intro Setup: Follow instructions in https://github.com/HazyResearch/snorkel Start jupyter notebook server using ./run.sh as described in snorkel README copy this notebook to a place accessible from the jupyter server started in previous step. Perhaps you may symlink your directory End of explanation """ parent = "/Users/thammegr/work/mte/data/newcorpus/workspace" # these lists are as per our previous experiments traing_list_file = parent + "/train_62r15_685k14_384k15.list" dev_list_file = parent + "/development.list" test_list_file = parent + "/test.list" # combine all the above using # cat train_62r15_685k14_384k15.list development.list test.list > all.list all_list_file = parent + "/all.list" # FIXME: overwriting training split, only 70 docs are chosen here traing_list_file = parent + "/train_head70.list" # and all list also overriden here all_list_file = parent + "/all-small.list" from snorkel.parser import CSVPathsPreprocessor from snorkel.parser import TextDocPreprocessor class CustomTextDocPreprocessor(TextDocPreprocessor): """ It customizes the following: generates custom doc_id which includes parent directory name because the file names are not unique Injects file path into the metadata - required for later stage """ def parse_file(self, fp, file_name): res = list(super(CustomTextDocPreprocessor, self).parse_file(fp, file_name)) assert len(res) == 1 # parent class must produce one record per file doc, content = res[0] doc.name = "/".join(fp.split("/")[-2:]).rsplit('.', 1)[0] doc.stable_id = self.get_stable_id(doc.name) print(doc.stable_id) doc.meta['file_path'] = fp yield doc, content doc_preprocessor = CSVPathsPreprocessor(path=all_list_file, column=0, delim=',', parser_factory=CustomTextDocPreprocessor) # Corpus parser to get features from snorkel.parser import CorpusParser corpus_parser = CorpusParser() %time corpus_parser.apply(doc_preprocessor) from snorkel.models import Document, Sentence print "Documents:", session.query(Document).count() print "Sentences:", session.query(Sentence).count() # Schema for Minerals from snorkel.models import candidate_subclass Mineral = candidate_subclass('Mineral', ['name']) Target = candidate_subclass('Target', ['name']) Element = candidate_subclass('Element', ['name']) from snorkel.candidates import Ngrams, CandidateExtractor from snorkel.matchers import RegexMatchEach name_matcher = RegexMatchEach(attrib='pos_tags', rgx="NN.*") #Elements and Minerals are unigrams element_cand_extr = CandidateExtractor(Element, [Ngrams(n_max=1)],[name_matcher]) mineral_cand_extr = CandidateExtractor(Mineral, [Ngrams(n_max=1)],[name_matcher]) # Target names can be unto 4 gram longer target_cand_extr = CandidateExtractor(Target, [Ngrams(n_max=4)],[name_matcher]) # Counts number of nouns in a sentence => could be used for filtering def number_of_nouns(sentence): active_sequence = False count = 0 last_tag = '' for tag in sentence.pos_tags: if tag.startswith('NN') and not active_sequence: active_sequence = True count += 1 elif not tag.startswith('NN') and active_sequence: active_sequence = False return count """ Explanation: Load all data to snorkel db End of explanation """ def load_paths(fp): with open(fp) as fp: return set(map(lambda x: x.strip().split(',')[0], fp.readlines())) train_files = load_paths(traing_list_file) dev_files = load_paths(dev_list_file) test_files = load_paths(test_list_file) splits = [train_files, dev_files, test_files] print("Docs:: Training size:", len(train_files), "Dev Size:", len(dev_files), "Test Size", len(test_files)) from snorkel.models import Document docs = session.query(Document).order_by(Document.name).all() train_sents = set() dev_sents = set() test_sents = set() for i, doc in enumerate(docs): fp = doc.meta['file_path'] group_name = [] for j, split in enumerate(splits): group_name.append('1' if fp in split else '0') group_name = ''.join(group_name) if group_name == '000': raise Exception("Document %s is not part of any split" % doc.name ) elif group_name == '100': group = train_sents elif group_name == '010': group = dev_sents elif group_name == '001': group = test_sents else: raise Exception("Document %s is in multiple splits %s" % (doc.name, group_name)) for s in doc.sentences: if number_of_nouns(s) > 0: # atleast one name in sentence group.add(s) print("Sentence:: Training size:", len(train_sents), "Dev Size:", len(dev_sents), "Test Size", len(test_sents)) """ Explanation: Split the corpus into train, development and testing Here we use the same split we used for previous setup with CoreNLP CRF classifier End of explanation """ dataset = train_sents element_cand_extr.apply(dataset, split=0, clear=True) mineral_cand_extr.apply(dataset, split=0, clear=False) target_cand_extr.apply(dataset, split=0, clear=False) train_elements = session.query(Element).filter(Element.split == 0).all() print "Number of candidate elements:", len(train_elements) train_minerals = session.query(Mineral).filter(Mineral.split == 0).all() print "Number of candidate Minerals:", len(train_minerals) train_targets = session.query(Target).filter(Target.split == 0).all() print "Number of candidate targets:", len(train_targets) """ Explanation: Extract candidates Here recall should be high, precison can be bad NOTE: Dont run this second time... Use the next cell to resume End of explanation """ for i, sents in enumerate([dev_sents, test_sents]): element_cand_extr.apply(sents, split=i+1, clear=True) mineral_cand_extr.apply(sents, split=i+1, clear=False) target_cand_extr.apply(sents, split=i+1, clear=False) print "Number of Elements:", session.query(Element).filter(Element.split == i+1).count() print "Number of Minerals:", session.query(Mineral).filter(Mineral.split == i+1).count() print "Number of Targets:", session.query(Target).filter(Target.split == i+1).count() """ Explanation: Extract Develop and Test Sets NOTE: DO not run this second time... End of explanation """ def load_set(path, lower=True): with codecs.open(path, 'r', 'utf-8') as f: lines = f.readlines() lines = map(lambda x: x.strip(), lines) lines = filter(lambda x: x and not x.startswith('#'), lines) if lower: lines = map(lambda x: x.lower(), lines) return set(lines) mte_targets = load_set("/Users/thammegr/work/mte/git/ref/MER-targets-pruned.txt", lower=False) print("Found %d target names in MTE dictionary" % len(mte_targets)) mte_targets = set(map(lambda x: x.replace('_', ' ').title(), mte_targets)) ## def LF_mte_targets_dict(c): return 1 if c.name.get_span().title() in mte_targets else -1 """ Explanation: Labelling Functions Targets 1. From a known dictionary End of explanation """ from lxml import etree # lxml supports XPath 1.0 which doesn't've regex match, so extending it ns = etree.FunctionNamespace(None) ns['matches'] = lambda _, val, patrn: re.match(patrn, str(val[0]) if val else "") is not None import requests mars_rocks_page = "https://en.wikipedia.org/wiki/List_of_rocks_on_Mars" tree = etree.HTML(requests.get(mars_rocks_page).text) names = tree.xpath('//h2[matches(span/@id, "^[0-9]{4}_.*")]/following-sibling::div[2]/ul/li//text()') names = map(lambda x: re.sub("\(.*\)", "", x), names) # remove explainations in () names = map(lambda x: re.sub(r'[^\w\s]','', x), names) # remove punctuations names = map(lambda x: x.strip(), names) # remove whitespaces names = filter(lambda x: re.match("^\d+$", x) is None, names) # remove the number citations which were [num] originally names = filter(lambda x: x and x[0].isupper(), names) # name should start with capital letter names = map(lambda x: x.title(), names) # map to title case wikipedia_targets = set(names) print("Found %d target names on wikipedia %s" %(len(wikipedia_targets), mars_rocks_page)) def LF_wikip_targets_dict(c): return 1 if c.name.get_span().title() in wikipedia_targets else 0 # this list is not exhaustive, so return 0 for missing # Debugging label functions from pprint import pprint labeled = [] for c in session.query(Target).filter(Target.split == 0).all(): if LF_wikip_targets_dict(c) != 0: # function labeled.append(c) print "Number labeled:", len(labeled) # Sample labeled labeled[:10] LFs = [ LF_mte_targets_dict, LF_wikip_targets_dict ] print("We have %d labeling functions" % len(LFs)) from snorkel.annotations import LabelAnnotator import numpy as np labeler = LabelAnnotator(f=LFs) #Let us label the training set np.random.seed(1701) %time L_train = labeler.apply(split=0) L_train # Loading it again -- resume from here L_train = labeler.load_matrix(session, split=0) L_train L_train.get_candidate(session, 10) # Get stats of LFs L_train.lf_stats(session, ) """ Explanation: 2. From wikipedia page End of explanation """ from snorkel.learning import GenerativeModel gen_model = GenerativeModel() gen_model.train(L_train, epochs=500, decay=0.95, step_size=0.1/L_train.shape[0], reg_param=1e-6) train_marginals = gen_model.marginals(L_train) # visualize import matplotlib.pyplot as plt plt.hist(train_marginals, bins=20) plt.show() gen_model.weights.lf_accuracy() L_dev = labeler.apply_existing(split=1) L_dev # development split dev_cands = session.query(Target).filter(Target.split == 1).all() len(dev_cands) dev_cands[:10]f from snorkel.viewer import SentenceNgramViewer sv = SentenceNgramViewer(dev_cands, session) sv from snorkel.annotations import load_gold_labels L_gold_dev = load_gold_labels(session, annotator_name=os.environ['USER'], split=1) L_gold_dev type(L_gold_dev) from snorkel.annotations import (csr_LabelMatrix, load_matrix, GoldLabelKey, GoldLabel) from snorkel.models import StableLabel from snorkel.db_helpers import reload_annotator_labels # NOTE: this is a shortcut for labeling # Ideally we should use labels from the SentenceNgramViewer true_labeller = LF_mte_targets_dict def load_gold_labels(cand_set, candidate_class, annotator_name="gold"): count = 0 for cand in cand_set: ctx_stable_ids = cand.name.get_span() query = session.query(StableLabel).filter(StableLabel.context_stable_ids == ctx_stable_ids) query = query.filter(StableLabel.annotator_name == annotator_name) if query.count() == 0: count += 1 true_label = true_labeller(cand) session.add(StableLabel( context_stable_ids=ctx_stable_ids, annotator_name=annotator_name, value=true_label)) # Commit session session.commit() # Reload annotator labels reload_annotator_labels(session, candidate_class, annotator_name, split=1, filter_label_split=False) reload_annotator_labels(session, candidate_class, annotator_name, split=2, filter_label_split=False) load_gold_labels(dev_cands, Target) from snorkel.annotations import load_gold_labels L_gold_dev = load_gold_labels(session, annotator_name='gold', split=1) tot = L_gold_dev.shape[0] n = len(filter(lambda x: x == 1, L_gold_dev)) print("Found %d positive labels out of %d" % (n, tot)) tp, fp, tn, fn = gen_model.score(session, L_dev, L_gold_dev) dev_cands[0].name.get_span() """ Explanation: Generative model End of explanation """
SunPower/pvfactors
docs/tutorials/Account_for_AOI_losses.ipynb
bsd-3-clause
# Import external libraries import os import numpy as np import matplotlib.pyplot as plt from datetime import datetime import pandas as pd import warnings # Settings %matplotlib inline np.set_printoptions(precision=3, linewidth=300) warnings.filterwarnings('ignore') plt.style.use('seaborn-whitegrid') plt.rcParams.update({'font.size': 12}) # Paths LOCAL_DIR = os.getcwd() DATA_DIR = os.path.join(LOCAL_DIR, 'data') filepath = os.path.join(DATA_DIR, 'test_df_inputs_MET_clearsky_tucson.csv') RUN_FIXED_TILT = True """ Explanation: Account for AOI reflection losses (in full mode only) In this section, we will learn: how pvfactors accounts for AOI losses by default how to account for AOI-dependent reflection losses for direct, circumsolar, and horizon irradiance components how to account for AOI-dependent reflection losses for isotropic and reflection irradiance components how to run all of this using the pvfactors run functions Imports and settings End of explanation """ # Helper functions for plotting and simulation def plot_irradiance(df_report): # Plot irradiance f, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 4)) # Plot back surface irradiance df_report[['qinc_back', 'qabs_back']].plot(ax=ax[0]) ax[0].set_title('Back surface irradiance') ax[0].set_ylabel('W/m2') # Plot front surface irradiance df_report[['qinc_front', 'qabs_front']].plot(ax=ax[1]) ax[1].set_title('Front surface irradiance') ax[1].set_ylabel('W/m2') plt.show() def plot_aoi_losses(df_report): # plotting AOI losses f, ax = plt.subplots(figsize=(5.5, 4)) df_report[['aoi_losses_back_%']].plot(ax=ax) df_report[['aoi_losses_front_%']].plot(ax=ax) # Adjust axes ax.set_ylabel('%') ax.legend(['AOI losses back PV row', 'AOI losses front PV row']) ax.set_title('AOI losses') plt.show() # Create a function that will build a simulation report def fn_report(pvarray): # Get irradiance values report = {'qinc_back': pvarray.ts_pvrows[1].back.get_param_weighted('qinc'), 'qabs_back': pvarray.ts_pvrows[1].back.get_param_weighted('qabs'), 'qinc_front': pvarray.ts_pvrows[1].front.get_param_weighted('qinc'), 'qabs_front': pvarray.ts_pvrows[1].front.get_param_weighted('qabs')} # Calculate AOI losses report['aoi_losses_back_%'] = (report['qinc_back'] - report['qabs_back']) / report['qinc_back'] * 100. report['aoi_losses_front_%'] = (report['qinc_front'] - report['qabs_front']) / report['qinc_front'] * 100. # Return report return report """ Explanation: Let's define a few helper functions that will help clarify the notebook End of explanation """ def export_data(fp): tz = 'US/Arizona' df = pd.read_csv(fp, index_col=0) df.index = pd.DatetimeIndex(df.index).tz_convert(tz) return df df = export_data(filepath) df_inputs = df.iloc[:48, :] # Plot the data f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12, 3)) df_inputs[['dni', 'dhi']].plot(ax=ax1) df_inputs[['solar_zenith', 'solar_azimuth']].plot(ax=ax2) df_inputs[['surface_tilt', 'surface_azimuth']].plot(ax=ax3) plt.show() # Use a fixed albedo albedo = 0.2 """ Explanation: Get timeseries inputs End of explanation """ pvarray_parameters = { 'n_pvrows': 3, # number of pv rows 'pvrow_height': 1, # height of pvrows (measured at center / torque tube) 'pvrow_width': 1, # width of pvrows 'axis_azimuth': 0., # azimuth angle of rotation axis 'gcr': 0.4, # ground coverage ratio } """ Explanation: Prepare PV array parameters End of explanation """ from pvfactors.geometry import OrderedPVArray # Create PV array pvarray = OrderedPVArray.init_from_dict(pvarray_parameters) from pvfactors.engine import PVEngine from pvfactors.irradiance import HybridPerezOrdered # Create irradiance model irradiance_model = HybridPerezOrdered(rho_front=0.03, rho_back=0.05) # Create engine engine = PVEngine(pvarray, irradiance_model=irradiance_model) # Fit engine to data engine.fit(df_inputs.index, df_inputs.dni, df_inputs.dhi, df_inputs.solar_zenith, df_inputs.solar_azimuth, df_inputs.surface_tilt, df_inputs.surface_azimuth, albedo) # Plot pvarray shapely geometries f, ax = plt.subplots(figsize=(8, 4)) pvarray.plot_at_idx(12, ax) plt.title('Modeled PV array at {}'.format(df_inputs.index[12])) plt.show() # Run full mode simulation report = engine.run_full_mode(fn_build_report=fn_report) # Turn report into dataframe df_report = pd.DataFrame(report, index=df_inputs.index) plot_irradiance(df_report) """ Explanation: Default AOI loss behavior In pvfactors: qinc is the total incident irradiance on a surface, and it does not account for reflection losses but qabs, which is the total absorbed irradiance by a surface, does accounts for it. By default, pvfactors assumes that all reflection losses (or AOI losses) are diffuse; i.e. they do not depend on angle of incidence (AOI). Here is an example. Let's run a full mode simulation (reflection equilibrium) and compare the calculated incident and absorbed irradiance on both sides of a PV row in a modeled PV array. We'll use 3% reflection for PV row front surfaces, and 5% for the back surfaces. End of explanation """ plot_aoi_losses(df_report) """ Explanation: Let's plot the back AOI losses End of explanation """ # import utility function from pvfactors.viewfactors.aoimethods import faoi_fn_from_pvlib_sandia # Choose a module name module_name = 'SunPower_128_Cell_Module___2009_' # Create an faoi function faoi_function = faoi_fn_from_pvlib_sandia(module_name) # Plot faoi function values aoi_values = np.linspace(0, 180, 100) faoi_values = faoi_function(aoi_values) f, ax = plt.subplots() ax.plot(aoi_values, faoi_values) ax.set_title('fAOI values for pvlib\'s {}'.format(module_name)) ax.set_ylabel('fAOI values') ax.set_xlabel('AOI angles measured from "horizontal" [deg]') plt.show() """ Explanation: As shown above, by default pvfactors apply constant values of AOI losses for all the surfaces in the system, and for all the incident irradiance components: 3% loss for the irradiance incident on front of PV rows, which corresponds to the chosen rho_front in the irradiance model 5% loss for the irradiance incident on back of PV rows, which corresponds to the chosen rho_back in the irradiance model Use an fAOI function in the irradiance model The next step that can improve the AOI loss calculation, especially for the PV row front surface that receives a lot of direct light, would be to use reflection losses that would be dependent on the AOI, and that would be applied to all the irradiance model components: direct, circumsolar, and horizon light components. What is an fAOI function? The fAOI function that the users need to provide takes an angle of incidence as input (AOI measured in degrees and against the surface horizontal - from 0 to 180 deg, not against the surface normal vector - which would have been from 0 to 90 deg), and it returns a transmission value for the incident light. So it's effectively a factor that removes reflection losses. Let's see what this looks like. First, let's create such a function using a pvfactors utility function, and then we'll plot it. Given a pvlib module database name, you can create an fAOI function as follows using pvfactors. End of explanation """ # Create irradiance model with fAOI function irradiance_model = HybridPerezOrdered(faoi_fn_front=faoi_function, faoi_fn_back=faoi_function) """ Explanation: As expected, there are less reflection losses for incident light rays normal to the surface than everywhere else. Use the fAOI function It's then easy to use the created fAOI function in the irradiance models. It just has to be passed to the model at initialization. For this example, we will use the same fAOI function for the front and back surfaces of the PV rows. End of explanation """ # Create engine engine = PVEngine(pvarray, irradiance_model=irradiance_model) # Fit engine to data engine.fit(df_inputs.index, df_inputs.dni, df_inputs.dhi, df_inputs.solar_zenith, df_inputs.solar_azimuth, df_inputs.surface_tilt, df_inputs.surface_azimuth, albedo) # Run full mode simulation report = engine.run_full_mode(fn_build_report=fn_report) # Turn report into dataframe df_report = pd.DataFrame(report, index=df_inputs.index) """ Explanation: Then pass the model to the PVEngine and run the simulation as usual. End of explanation """ plot_irradiance(df_report) plot_aoi_losses(df_report) """ Explanation: Let's now see what the irradiance and AOI losses look like. End of explanation """ # first let's discretize the PV row sides pvarray_parameters.update({ 'cut': {1: {'front': 5, 'back': 5}} }) # Create a new pv array pvarray = OrderedPVArray.init_from_dict(pvarray_parameters) """ Explanation: We can now see the changes in AOI losses, which now use the fAOI function for the direct, circumsolar, and horizon light components. But it still uses the constant rho_front and rho_back values for the reflection and isotropic components of the incident light on the surfaces. Advanced: use an fAOI function for the (ground and array) reflection and isotropic components The more advanced use is to apply the fAOI losses to the reflection and isotropic component of the light incident on the PV row surfaces. In order to do so you simply need to pass the fAOI function to the view factor calculator before initializing the PVEngine. In this case, the simulation workflow will be as follows: the PVEngine will still calculate the equilibrium of reflections assuming diffuse surfaces and constant reflection losses it will then use the calculated radiosity values and apply the fAOI using an integral combining the AOI losses and the view factor integrands, as described in the theory section, and similarly to Marion, B., et al (2017) A word of caution The users should be careful when using fAOI losses with the view factor calculator for the following reasons: in order to be fully consistent in the PVEngine calculations, it is wiser to re-calculate a global hemispherical reflectivity value using the fAOI function, which will be used in the reflection equilibrium calculation the method used for accounting fAOI losses in reflections is physically valid only if the surfaces are "infinitesimal" because it uses view factor formulas only valid in this case (see http://www.thermalradiation.net/sectionb/B-71.html). So in order to make it work in pvfactors, you'll need to discretize the PV row sides into smaller segments the method relies on the numerical calculation of an integral, and that calculation will converge only given a sufficient number of integral points (which can be provided to the pvfactors view factor calculator). Marion, B., et al (2017) seems to be using 180 points, but in pvfactors' implementation it doesn't look like it's enough for the integral to converge, so we'll use 1000 integral points in this example the two points above slow down the computation time by an order of magnitude. 8760 simulations that normally take a couple of seconds to run with pvfactors's full mode can then take up to a minute Apply fAOI losses to reflection terms Discretize the PV row sides of the PV array: End of explanation """ from pvfactors.viewfactors import VFCalculator vf_calculator = VFCalculator(faoi_fn_front=faoi_function, faoi_fn_back=faoi_function, n_aoi_integral_sections=1000) """ Explanation: Add fAOI losses to the view factor calculator, and use 1000 integration points End of explanation """ # For back PV row surface is_back = True rho_back = vf_calculator.vf_aoi_methods.rho_from_faoi_fn(is_back) # For front PV row surface is_back = False rho_front = vf_calculator.vf_aoi_methods.rho_from_faoi_fn(is_back) # Print results print('Reflectivity values for front side: {}, and back side: {}'.format(rho_front, rho_back)) """ Explanation: Re-calculate global hemispherical reflectivity values based on fAOI function End of explanation """ irradiance_model = HybridPerezOrdered(rho_front=rho_front, rho_back=rho_back, faoi_fn_front=faoi_function, faoi_fn_back=faoi_function) """ Explanation: Since we're using the same fAOI function for front and back sides, we now get the same global hemispherical reflectivity values. We can now create the irradiance model. End of explanation """ # Create engine engine = PVEngine(pvarray, vf_calculator=vf_calculator, irradiance_model=irradiance_model) # Fit engine to data engine.fit(df_inputs.index, df_inputs.dni, df_inputs.dhi, df_inputs.solar_zenith, df_inputs.solar_azimuth, df_inputs.surface_tilt, df_inputs.surface_azimuth, albedo) # Plot pvarray shapely geometries f, ax = plt.subplots(figsize=(8, 4)) ax = pvarray.plot_at_idx(12, ax, with_surface_index=True) plt.title('Modeled PV array at {}'.format(df_inputs.index[14])) plt.show() """ Explanation: Simulations can then be run the usual way: End of explanation """ # Run full mode simulation report = engine.run_full_mode(fn_build_report=fn_report) # Turn report into dataframe df_report = pd.DataFrame(report, index=df_inputs.index) """ Explanation: Run the simulation: End of explanation """ plot_irradiance(df_report) plot_aoi_losses(df_report) """ Explanation: Let's now see what the irradiance and AOI losses look like. End of explanation """ # Define the parameters for the irradiance model and the view factor calculator irradiance_params = {'rho_front': rho_front, 'rho_back': rho_back, 'faoi_fn_front': faoi_function, 'faoi_fn_back': faoi_function} vf_calculator_params = {'faoi_fn_front': faoi_function, 'faoi_fn_back': faoi_function, 'n_aoi_integral_sections': 1000} """ Explanation: This is the way to apply fAOI losses to all the irradiance components in a pvfactors simulation. Doing all of the above using the "run functions" When using the "run functions", you'll just need to define the parameters in advance and then pass it to the functions. End of explanation """ from pvfactors.run import run_timeseries_engine # run simulations in parallel mode report_from_fn = run_timeseries_engine(fn_report, pvarray_parameters, df_inputs.index, df_inputs.dni, df_inputs.dhi, df_inputs.solar_zenith, df_inputs.solar_azimuth, df_inputs.surface_tilt, df_inputs.surface_azimuth, albedo, irradiance_model_params=irradiance_params, vf_calculator_params=vf_calculator_params) # Turn report into dataframe df_report_from_fn = pd.DataFrame(report_from_fn, index=df_inputs.index) plot_irradiance(df_report_from_fn) plot_aoi_losses(df_report_from_fn) """ Explanation: Using run_timeseries_engine() End of explanation """ class ReportBuilder(object): """Class for building the reports with multiprocessing""" @staticmethod def build(pvarray): pvrow = pvarray.ts_pvrows[1] report = {'qinc_front': pvrow.front.get_param_weighted('qinc'), 'qabs_front': pvrow.front.get_param_weighted('qabs'), 'qinc_back': pvrow.back.get_param_weighted('qinc'), 'qabs_back': pvrow.back.get_param_weighted('qabs')} # Calculate AOI losses report['aoi_losses_back_%'] = (report['qinc_back'] - report['qabs_back']) / report['qinc_back'] * 100. report['aoi_losses_front_%'] = (report['qinc_front'] - report['qabs_front']) / report['qinc_front'] * 100. # Return report return report @staticmethod def merge(reports): report = reports[0] keys = report.keys() for other_report in reports[1:]: for key in keys: report[key] = list(report[key]) report[key] += list(other_report[key]) return report class FaoiClass(object): """Class for passing the faoi function to engine""" @staticmethod def faoi(*args, **kwargs): fn = faoi_fn_from_pvlib_sandia(module_name) return fn(*args, **kwargs) """ Explanation: Using run_parallel_engine() Because of Python's multiprocessing, and because functions cannot be pickled in Python, the functions need to be wrapped up into classes. End of explanation """ # Define the parameters for the irradiance model and the view factor calculator irradiance_params = {'rho_front': rho_front, 'rho_back': rho_back, 'faoi_fn_front': FaoiClass, 'faoi_fn_back': FaoiClass} vf_calculator_params = {'faoi_fn_front': FaoiClass, 'faoi_fn_back': FaoiClass, 'n_aoi_integral_sections': 1000} from pvfactors.run import run_parallel_engine # run simulations in parallel mode report_from_fn = run_parallel_engine(ReportBuilder, pvarray_parameters, df_inputs.index, df_inputs.dni, df_inputs.dhi, df_inputs.solar_zenith, df_inputs.solar_azimuth, df_inputs.surface_tilt, df_inputs.surface_azimuth, albedo, irradiance_model_params=irradiance_params, vf_calculator_params=vf_calculator_params) # Turn report into dataframe df_report_from_fn = pd.DataFrame(report_from_fn, index=df_inputs.index) plot_irradiance(df_report_from_fn) plot_aoi_losses(df_report_from_fn) """ Explanation: Pass the objects through the dictionaries and run the simulation End of explanation """
turbomanage/training-data-analyst
courses/machine_learning/deepdive/04_features/labs/a_features.ipynb
apache-2.0
import math import shutil import numpy as np import pandas as pd import tensorflow as tf print(tf.__version__) tf.logging.set_verbosity(tf.logging.INFO) pd.options.display.max_rows = 10 pd.options.display.float_format = '{:.1f}'.format """ Explanation: Trying out features Learning Objectives: * Improve the accuracy of a model by adding new features with the appropriate representation The data is based on 1990 census data from California. This data is at the city block level, so these features reflect the total number of rooms in that block, or the total number of people who live on that block, respectively. Set Up In this first cell, we'll load the necessary libraries. End of explanation """ df = pd.read_csv("https://storage.googleapis.com/ml_universities/california_housing_train.csv", sep=",") """ Explanation: Next, we'll load our data set. End of explanation """ df.head() df.describe() """ Explanation: Examine and split the data It's a good idea to get to know your data a little bit before you work with it. We'll print out a quick summary of a few useful statistics on each column. This will include things like mean, standard deviation, max, min, and various quantiles. End of explanation """ np.random.seed(seed=1) #makes result reproducible msk = np.random.rand(len(df)) < 0.8 traindf = df[msk] evaldf = df[~msk] """ Explanation: Now, split the data into two parts -- training and evaluation. End of explanation """ def add_more_features(df): # TODO: Add more features to the dataframe return df # Create pandas input function def make_input_fn(df, num_epochs): return tf.estimator.inputs.pandas_input_fn( x = add_more_features(df), y = df['median_house_value'] / 100000, # will talk about why later in the course batch_size = 128, num_epochs = num_epochs, shuffle = True, queue_capacity = 1000, num_threads = 1 ) # Define your feature columns def create_feature_cols(): return [ tf.feature_column.numeric_column('housing_median_age') # TODO: Define additional feature columns # Hint: Are there any features that would benefit from bucketizing? ] # Create estimator train and evaluate function def train_and_evaluate(output_dir, num_train_steps): # TODO: Create tf.estimator.LinearRegressor, train_spec, eval_spec, and train_and_evaluate using your feature columns OUTDIR = './trained_model' # Run the model shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file train_and_evaluate(OUTDIR, 2000) """ Explanation: Training and Evaluation In this exercise, we'll be trying to predict median_house_value It will be our label (sometimes also called a target). We'll modify the feature_cols and input function to represent the features you want to use. Hint: Some of the features in the dataframe aren't directly correlated with median_house_value (e.g. total_rooms) but can you think of a column to divide it by that we would expect to be correlated with median_house_value? End of explanation """
turbomanage/training-data-analyst
courses/machine_learning/deepdive2/text_classification/labs/custom_tf_hub_word_embedding.ipynb
apache-2.0
!pip freeze | grep tensorflow-hub==0.7.0 || pip install tensorflow-hub==0.7.0 import os import tensorflow as tf import tensorflow_hub as hub """ Explanation: Custom TF-Hub Word Embedding with text2hub Learning Objectives: 1. Learn how to deploy AI Hub Kubeflow pipeline 1. Learn how to configure the run parameters for text2hub 1. Learn how to inspect text2hub generated artifacts and word embeddings in TensorBoard 1. Learn how to run TF 1.x generated hub module in TF 2.0 Introduction Pre-trained text embeddings such as TF-Hub modules are a great tool for building machine learning models for text features, since they capture relationships between words. These embeddings are generally trained on vast but generic text corpora like Wikipedia or Google News, which means that they are usually very good at representing generic text, but not so much when the text comes from a very specialized domain with unique vocabulary, such as in the medical field. One problem in particular that arises when applying a TF-Hub text module which was pre-trained on a generic corpus to specialized text is that all of the unique, domain-specific words will be mapped to the same “out-of-vocabulary” (OOV) vector. By doing so we lose a very valuable part of the text information, because for specialized texts the most informative words are often the words that are very specific to that special domain. Another issue is that of commonly misspelled words from text gathered from say, customer feedback. Applying a generic pre-trained embedding will send the misspelled word to the OOV vectors, losing precious information. However, by creating a TF-Hub module tailored to the texts coming from that customer feedback means that common misspellings present in your real customer data will be part of the embedding vocabulary and should be close by closeby to the original word in the embedding space. In this notebook, we will learn how to generate a text TF-hub module specific to a particular domain using the text2hub Kubeflow pipeline available on Google AI Hub. This pipeline takes as input a corpus of text stored in a GCS bucket and outputs a TF-Hub module to a GCS bucket. The generated TF-Hub module can then be reused both in TF 1.x or in TF 2.0 code by referencing the output GCS bucket path when loading the module. Our first order of business will be to learn how to deploy a Kubeflow pipeline, namely text2hub, stored in AI Hub to a Kubeflow cluster. Then we will dig into the pipeline run parameter configuration and review the artifacts produced by the pipeline during its run. These artifacts are meant to help you assess how good the domain specific TF-hub module you generated is. In particular, we will explore the embedding space visually using TensorBoard projector, which provides a tool to list the nearest neighbors to a given word in the embedding space. At last, we will explain how to run the generated module both in TF 1.x and TF 2.0. Running the module in TF 2.0 will necessite a small trick that’s useful to know in itself because it allows you to use all the TF 1.x modules in TF hub in TF 2.0 as a Keras layer. Each learning objective will correspond to a #TODO in this student lab notebook -- try to complete this notebook first and then review the solution notebook. End of explanation """ PROJECT = "your-gcp-project-here" # REPLACE WITH YOUR PROJECT NAME BUCKET = "your-gcp-bucket-here" # REPLACE WITH YOUR BUCKET NAME os.environ["PROJECT"] = PROJECT os.environ["BUCKET"] = BUCKET """ Explanation: Replace by your GCP project and bucket: End of explanation """ %%bash URL=http://www.gutenberg.org/cache/epub/24564/pg24564.txt OUTDIR=gs://$BUCKET/custom_embedding CORPUS=surgery_manual.txt curl $URL > $CORPUS gsutil cp $CORPUS $OUTDIR/$CORPUS """ Explanation: Setting up the Kubeflow cluster We assume that you have a running Kubeflow cluster. If not, to deploy a Kubeflow cluster in your GCP project, use the Kubeflow cluster deployer. There is a setup video that will take you over all the steps in detail, and explains how to access to the Kubeflow Dashboard UI, once it is running. You'll need to create an OAuth client for authentication purposes: Follow the instructions here. Loading the dataset in GCS The corpus we chose is one of Project Gutenberg medical texts: A Manual of the Operations of Surgery by Joseph Bell, containing very specialized language. The first thing to do is to upload the text into a GCS bucket: End of explanation """ !echo gs://$BUCKET/custom_embedding/surgery_manual.txt """ Explanation: It has very specialized language such as On the surface of the abdomen the position of this vessel would be indicated by a line drawn from about an inch on either side of the umbilicus to the middle of the space between the symphysis pubis and the crest of the ilium. Now let's go over the steps involved in creating your own embedding from that corpus. Lab Task 1: Complete Step 1 through Step 3 below to deploy an AI Hub Kubeflow pipeline. Step 1: Download the text2hub pipeline from AI Hub Go on AI Hub and search for the text2hub pipeline, or just follow this link. You'll land onto a page describing text2hub. Click on the "Download" button on that page to download the Kubeflow pipeline The text2hub pipeline is a KubeFlow pipeline that comprises three components; namely: The text2cooc component that computes a word co-occurrence matrix from a corpus of text The cooc2emb component that factorizes the co-occurrence matrix using Swivel into the word embeddings exported as a tsv file The emb2hub component that takes the word embedding file and generates a TF Hub module from it Each component is implemented as a Docker container image that's stored into Google Cloud Docker registry, gcr.io. The pipeline.tar.gz file that you downloaded is a yaml description of how these containers need to be composed as well as where to find the corresponding images. Remark: Each component can be run individually as a single component pipeline in exactly the same manner as the text2hub pipeline. On AI Hub, each component has a pipeline page describing it and from where you can download the associated single component pipeline: text2cooc cooc2emb emb2hub Step 2: Upload the pipeline to the Kubeflow cluster Go to your Kubeflow cluster dashboard and click on the pipeline tab to create a new pipeline. You'll be prompted to upload the pipeline file you have just downloaded. Rename the generated pipeline name to be text2hub to keep things nice and clean. Step 3: Create a pipeline run After uploading the pipeline, you should see text2hub appear on the pipeline list. Click on it. This will bring you to a page describing the pipeline (explore!) and allowing you to create a run. You can inspect the input and output parameters of each of the pipeline components by clicking on the component node in the graph representing the pipeline. Lab Task 2: Complete Step 4 below to configure the run parameters for text2hub. Step 4: Enter the run parameters text2hub has the following run parameters you can configure: Argument | Description | Optional | Data Type | Accepted values | Default ------------------------------------------------ | ------------------------------------------------------------------------------------- | -------- | --------- | --------------- | ------- gcs-path-to-the-text-corpus | A Cloud Storage location pattern (i.e., glob) where the text corpus will be read from | False | String | gs://... | - gcs-directory-path-for-pipeline-output | A Cloud Storage directory path where the pipeline output will be exported | False | String | gs://... | - number-of-epochs | Number of epochs to train the embedding algorithm (Swivel) on | True | Integer | - | 40 embedding-dimension | Number of components of the generated embedding vectors | True | Integer | - | 128 co-occurrence-word-window-size | Size of the sliding word window where co-occurrences are extracted from | True | Integer | - | 10 number-of-out-of-vocabulary-buckets | Number of out-of-vocabulary buckets | True | Integer | - | 1 minimum-occurrences-for-a-token-to-be-considered | Minimum number of occurrences for a token to be included in the vocabulary | True | Integer | - | 5 You can leave most parameters with their default values except for gcs-path-to-the-test-corpus whose value should be set to End of explanation """ !echo gs://$BUCKET/custom_embedding """ Explanation: and for gcs-directory-path-for-pipeline-output which we will set to End of explanation """ !echo tensorboard --port 8080 --logdir gs://$BUCKET/custom_embedding/embeddings """ Explanation: Remark: gcs-path-to-the-test-corpus will accept a GCS pattern like gs://BUCKET/data/*.txt or simply a path like gs://BUCKET/data/ to a GCS directory. All the files that match the pattern or that are in that directory will be parsed to create the word embedding TF-Hub module. Once these values have been set, you can start the run by clicking on "Start". Lab Task 3: Complete Step 5 below to use Tensorboard to inspect the text2hub generated artifacts and word embeddings. Step 5: Inspect the run artifacts Once the run has started you can see its state by going to the experiment tab and clicking on the name of the run (here "text2hub-1"). It will show you the pipeline graph. The components in green have successfuly completed. You can then click on them and look at the artifacts that these components have produced. The text2cooc components has "co-occurrence extraction summary" showing you the GCS path where the co-occurrence data has been saved. Their is a corresponding link that you can paste into your browser to inspect the co-occurrence data from the GCS browser. Some statistics about the vocabulary are given to you such as the most and least frequent tokens. You can also download the vocabulary file containing the token to be embedded. The cooc2emb has three artifacts * An "Embedding Extraction Summary" providing the information as where the model chekpoints and the embedding tables are exported to on GCP * A similarity matrix from a random sample of words giving you an indication whether the model associates close-by vectors to similar words * An button to start TensorBoard from the UI to inspect the model and visualize the word embeddings We can have a look at the word embedding visualization provided by TensorBoard. Start TensorBoard by clicking on "Start" and then "Open" buttons, and then select "Projector". Remark: The projector tab may take some time to appear. If it takes too long it may be that your Kubeflow cluster is running an incompatible version of TensorBoard (your TB version should be between 1.13 and 1.15). If that's the case, just run Tensorboard from CloudShell or locally by issuing the following command: End of explanation """ MODULE = "gs://{bucket}/custom_embedding/hub-module".format(bucket=BUCKET) MODULE """ Explanation: The projector view will present you with a representation of the word vectors in a 3 dimensional space (the dim is reduced through PCA) that you can interact with. Enter in the search tool a few words like "ilium" and points in the 3D space will light up. If you click on a word vector, you'll see appear the n nearest neighbors of that word in the embedding space. The nearset neighbors are both visualized in the center panel and presented as a flat list on the right. Explore the nearest neighbors of a given word and see if they kind of make sense. This will give you a rough understanding of the embedding quality. If it nearest neighbors do not make sense after trying for a few key words, you may need rerun text2hub, but this time with either more epochs or more data. Reducing the embedding dimension may help as well as modifying the co-occurence window size (choose a size that make sense given how your corpus is split into lines.) The emb2hub artifacts give you a snippet of TensorFlow 1.x code showing you how to re-use the generated TF-Hub module in your code. We will demonstrate how to use the TF-Hub module in TF 2.0 in the next section. Step 7: Using the generated TF-Hub module (TODO 4) Let's see now how to load the TF-Hub module generated by text2hub in TF 2.0. We first store the GCS bucket path where the TF-Hub module has been exported into a variable: End of explanation """ med_embed = # TODO: Your code goes here. """ Explanation: Now we are ready to create a KerasLayer out of our custom text embedding. Lab Task 4a: Complete the code below to create a KerasLayer using the MODULE defined above. End of explanation """ outputs = # TODO: Your code goes here. outputs """ Explanation: That layer when called with a list of sentences will create a sentence vector for each sentence by averaging the word vectors of the sentence. Lab Task 4b: Use the med_embed layer you created above to find text embeddings for the words/phrases 'ilium', 'I have a fracture', and 'aneurism'. End of explanation """
ANTsX/ANTsPy
docs/other/ANTsPy Tutorial.ipynb
apache-2.0
import ants import matplotlib.pyplot as plt %matplotlib inline img = ants.image_read( ants.get_ants_data('r16'), 'float' ) plt.imshow(img.numpy(), cmap='Greys_r') plt.show() mask = ants.get_mask(img) plt.imshow(mask.numpy()) plt.show() """ Explanation: ANTsPy Tutorial In this tutorial, I will show of some of the core ANTsPy functionality. I will highlight the similarities with ANTsR. Basic IO, Processing, & Plotting End of explanation """ img_n4 = ants.n4_bias_field_correction(img, shrink_factor=3) plt.imshow(img_n4.numpy(), cmap='Greys_r') plt.show() """ Explanation: N4 Bias Correction End of explanation """ diff = img - img_n4 plt.imshow(diff.numpy()) plt.show() """ Explanation: Overloaded Mathematical Operators End of explanation """ img = ants.image_read( ants.get_ants_data("r16") ).clone('float') img = ants.resample_image( img, (64,64), 1, 0 ) mask = ants.get_mask(img) segs1 = ants.atropos( a = img, m = '[0.2,1x1]', c = '[2,0]', i = 'kmeans[3]', x = mask ) print(segs1) for i in range(3): plt.imshow(segs1['probabilityimages'][i].numpy()) plt.title('Class %i' % i) plt.show() plt.imshow(segs1['segmentation'].numpy()) plt.show() """ Explanation: Atropos The following example has been validated with ANTsR. That is, both ANTsR and ANTsPy return the EXACT same result (images). R Version: R img &lt;- antsImageRead( getANTsRData("r16") , 2 ) img &lt;- resampleImage( img, c(64,64), 1, 0 ) mask &lt;- getMask(img) segs1 &lt;- atropos( a = img, m = '[0.2,1x1]', c = '[2,0]', i = 'kmeans[3]', x = mask ) End of explanation """ fi = ants.image_read( ants.get_ants_data('r16') ).clone('float') mi = ants.image_read( ants.get_ants_data('r64')).clone('float') fi = ants.resample_image(fi,(60,60),1,0) mi = ants.resample_image(mi,(60,60),1,0) mytx = ants.registration(fixed=fi, moving=mi, type_of_transform = 'SyN' ) print(mytx) plt.imshow(mi.numpy()) plt.title('Original moving image') plt.show() plt.imshow(fi.numpy()) plt.title('Original fixed image') plt.show() plt.imshow(mytx['warpedmovout'].numpy()) plt.title('Warped moving imag') plt.show() """ Explanation: Registration R Version: R fi &lt;- antsImageRead(getANTsRData("r16") ) mi &lt;- antsImageRead(getANTsRData("r64") ) fi&lt;-resampleImage(fi,c(60,60),1,0) mi&lt;-resampleImage(mi,c(60,60),1,0) # speed up mytx &lt;- antsRegistration(fixed=fi, moving=mi, typeofTransform = c('SyN') ) End of explanation """ import numpy as np import pandas as pd mat = pd.read_csv('~/desktop/mat.csv', index_col=0).values mat2 = pd.read_csv('~/desktop/mat2.csv', index_col=0).values mydecom = ants.sparseDecom2(inmatrix=(mat,mat2), sparseness=(0.1,0.3), nvecs=3, its=3, perms=0) print('Available Results: ', list(mydecom.keys())) print('Correlations: ', mydecom['corrs']) """ Explanation: SparseDecom2 Another ANTsR-validated result: R mat&lt;-replicate(100, rnorm(20)) mat2&lt;-replicate(100, rnorm(20)) mat&lt;-scale(mat) mat2&lt;-scale(mat2) mydecom&lt;-sparseDecom2(inmatrix = list(mat,mat2), sparseness=c(0.1,0.3), nvecs=3, its=3, perms=0) The 3 correlation values from that experiment are: [0.9762784, 0.9705170, 0.7937968] After saving those exact matrices, and running the ANTsPy version, we see that we get the exact same result End of explanation """
jforbess/pvlib-python
docs/tutorials/tmy_and_diffuse_irrad_models.ipynb
bsd-3-clause
# built-in python modules import os import inspect # scientific python add-ons import numpy as np import pandas as pd # plotting stuff # first line makes the plots appear in the notebook %matplotlib inline import matplotlib.pyplot as plt # seaborn makes your plots look better try: import seaborn as sns sns.set(rc={"figure.figsize": (12, 6)}) except ImportError: print('We suggest you install seaborn using conda or pip and rerun this cell') # finally, we import the pvlib library import pvlib # Find the absolute file path to your pvlib installation pvlib_abspath = os.path.dirname(os.path.abspath(inspect.getfile(pvlib))) # absolute path to a data file datapath = os.path.join(pvlib_abspath, 'data', '703165TY.csv') # read tmy data with year values coerced to a single year tmy_data, meta = pvlib.tmy.readtmy3(datapath, coerce_year=2015) tmy_data.index.name = 'Time' # TMY data seems to be given as hourly data with time stamp at the end # shift the index 30 Minutes back for calculation of sun positions tmy_data = tmy_data.shift(freq='-30Min') tmy_data.GHI.plot() plt.ylabel('Irradiance (W/m**2)') tmy_data.DHI.plot() plt.ylabel('Irradiance (W/m**2)') surface_tilt = 30 surface_azimuth = 180 # pvlib uses 0=North, 90=East, 180=South, 270=West convention albedo = 0.2 # create pvlib Location object based on meta data sand_point = pvlib.location.Location(meta['latitude'], meta['longitude'], tz='US/Alaska', altitude=meta['altitude'], name=meta['Name'].replace('"','')) print(sand_point) solpos = pvlib.solarposition.get_solarposition(tmy_data.index, sand_point) solpos.plot() # the extraradiation function returns a simple numpy array # instead of a nice pandas series. We will change this # in a future version dni_extra = pvlib.irradiance.extraradiation(tmy_data.index) dni_extra = pd.Series(dni_extra, index=tmy_data.index) dni_extra.plot() plt.ylabel('Extra terrestrial radiation (W/m**2)') airmass = pvlib.atmosphere.relativeairmass(solpos['apparent_zenith']) airmass.plot() plt.ylabel('Airmass') """ Explanation: TMY data and diffuse irradiance models This tutorial explores using TMY data as inputs to different plane of array diffuse irradiance models. This tutorial has been tested against the following package versions: * pvlib 0.2.0 * Python 2.7.10 * IPython 3.2 * pandas 0.16.2 It should work with other Python and Pandas versions. It requires pvlib > 0.2.0 and IPython > 3.0. Authors: * Rob Andrews (@Calama-Consulting), Heliolytics, June 2014 * Will Holmgren (@wholmgren), University of Arizona, July 2015 Setup See the tmy_to_power tutorial for more detailed explanations for the initial setup End of explanation """ diffuse_irrad = pd.DataFrame(index=tmy_data.index) models = ['Perez', 'Hay-Davies', 'Isotropic', 'King', 'Klucher', 'Reindl'] """ Explanation: Diffuse irradiance models Make an empty pandas DataFrame for the results. End of explanation """ diffuse_irrad['Perez'] = pvlib.irradiance.perez(surface_tilt, surface_azimuth, dhi=tmy_data.DHI, dni=tmy_data.DNI, dni_extra=dni_extra, solar_zenith=solpos.apparent_zenith, solar_azimuth=solpos.azimuth, airmass=airmass) """ Explanation: Perez End of explanation """ diffuse_irrad['Hay-Davies'] = pvlib.irradiance.haydavies(surface_tilt, surface_azimuth, dhi=tmy_data.DHI, dni=tmy_data.DNI, dni_extra=dni_extra, solar_zenith=solpos.apparent_zenith, solar_azimuth=solpos.azimuth) """ Explanation: HayDavies End of explanation """ diffuse_irrad['Isotropic'] = pvlib.irradiance.isotropic(surface_tilt, dhi=tmy_data.DHI) """ Explanation: Isotropic End of explanation """ diffuse_irrad['King'] = pvlib.irradiance.king(surface_tilt, dhi=tmy_data.DHI, ghi=tmy_data.GHI, solar_zenith=solpos.apparent_zenith) """ Explanation: King Diffuse model End of explanation """ diffuse_irrad['Klucher'] = pvlib.irradiance.klucher(surface_tilt, surface_azimuth, dhi=tmy_data.DHI, ghi=tmy_data.GHI, solar_zenith=solpos.apparent_zenith, solar_azimuth=solpos.azimuth) """ Explanation: Klucher Model End of explanation """ diffuse_irrad['Reindl'] = pvlib.irradiance.reindl(surface_tilt, surface_azimuth, dhi=tmy_data.DHI, dni=tmy_data.DNI, ghi=tmy_data.GHI, dni_extra=dni_extra, solar_zenith=solpos.apparent_zenith, solar_azimuth=solpos.azimuth) """ Explanation: Reindl End of explanation """ yearly = diffuse_irrad.resample('A', how='sum').dropna().squeeze() / 1000.0 # kWh monthly = diffuse_irrad.resample('M', how='sum', kind='period') / 1000.0 daily = diffuse_irrad.resample('D', how='sum') / 1000.0 """ Explanation: Calculate yearly, monthly, daily sums. End of explanation """ ax = diffuse_irrad.plot(title='In-plane diffuse irradiance', alpha=.75, lw=1) ax.set_ylim(0, 800) ylabel = ax.set_ylabel('Diffuse Irradiance [W]') plt.legend() diffuse_irrad.describe() diffuse_irrad.dropna().plot(kind='density') """ Explanation: Plot Results End of explanation """ ax_daily = daily.tz_convert('UTC').plot(title='Daily diffuse irradiation') ylabel = ax_daily.set_ylabel('Irradiation [kWh]') """ Explanation: Daily End of explanation """ ax_monthly = monthly.plot(title='Monthly average diffuse irradiation', kind='bar') ylabel = ax_monthly.set_ylabel('Irradiation [kWh]') """ Explanation: Monthly End of explanation """ yearly.plot(kind='barh') """ Explanation: Yearly End of explanation """ mean_yearly = yearly.mean() yearly_mean_deviation = (yearly - mean_yearly) / yearly * 100.0 yearly_mean_deviation.plot(kind='bar') """ Explanation: Compute the mean deviation from measured for each model and display as a function of the model End of explanation """
YeEmrick/learning
cs231/assignment/assignment2/.ipynb_checkpoints/BatchNormalization-checkpoint.ipynb
apache-2.0
# As usual, a bit of setup import time import numpy as np import matplotlib.pyplot as plt from cs231n.classifiers.fc_net import * from cs231n.data_utils import get_CIFAR10_data from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array from cs231n.solver import Solver %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # for auto-reloading external modules # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 def rel_error(x, y): """ returns relative error """ return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y)))) # Load the (preprocessed) CIFAR10 data. data = get_CIFAR10_data() for k, v in data.iteritems(): print '%s: ' % k, v.shape """ Explanation: Batch Normalization One way to make deep networks easier to train is to use more sophisticated optimization procedures such as SGD+momentum, RMSProp, or Adam. Another strategy is to change the architecture of the network to make it easier to train. One idea along these lines is batch normalization which was recently proposed by [3]. The idea is relatively straightforward. Machine learning methods tend to work better when their input data consists of uncorrelated features with zero mean and unit variance. When training a neural network, we can preprocess the data before feeding it to the network to explicitly decorrelate its features; this will ensure that the first layer of the network sees data that follows a nice distribution. However even if we preprocess the input data, the activations at deeper layers of the network will likely no longer be decorrelated and will no longer have zero mean or unit variance since they are output from earlier layers in the network. Even worse, during the training process the distribution of features at each layer of the network will shift as the weights of each layer are updated. The authors of [3] hypothesize that the shifting distribution of features inside deep neural networks may make training deep networks more difficult. To overcome this problem, [3] proposes to insert batch normalization layers into the network. At training time, a batch normalization layer uses a minibatch of data to estimate the mean and standard deviation of each feature. These estimated means and standard deviations are then used to center and normalize the features of the minibatch. A running average of these means and standard deviations is kept during training, and at test time these running averages are used to center and normalize features. It is possible that this normalization strategy could reduce the representational power of the network, since it may sometimes be optimal for certain layers to have features that are not zero-mean or unit variance. To this end, the batch normalization layer includes learnable shift and scale parameters for each feature dimension. [3] Sergey Ioffe and Christian Szegedy, "Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift", ICML 2015. End of explanation """ # Check the training-time forward pass by checking means and variances # of features both before and after batch normalization # Simulate the forward pass for a two-layer network N, D1, D2, D3 = 200, 50, 60, 3 X = np.random.randn(N, D1) W1 = np.random.randn(D1, D2) W2 = np.random.randn(D2, D3) a = np.maximum(0, X.dot(W1)).dot(W2) print 'Before batch normalization:' print ' means: ', a.mean(axis=0) print ' stds: ', a.std(axis=0) # Means should be close to zero and stds close to one print 'After batch normalization (gamma=1, beta=0)' a_norm, _ = batchnorm_forward(a, np.ones(D3), np.zeros(D3), {'mode': 'train'}) print ' mean: ', a_norm.mean(axis=0) print ' std: ', a_norm.std(axis=0) # Now means should be close to beta and stds close to gamma gamma = np.asarray([1.0, 2.0, 3.0]) beta = np.asarray([11.0, 12.0, 13.0]) a_norm, _ = batchnorm_forward(a, gamma, beta, {'mode': 'train'}) print 'After batch normalization (nontrivial gamma, beta)' print ' means: ', a_norm.mean(axis=0) print ' stds: ', a_norm.std(axis=0) # Check the test-time forward pass by running the training-time # forward pass many times to warm up the running averages, and then # checking the means and variances of activations after a test-time # forward pass. N, D1, D2, D3 = 200, 50, 60, 3 W1 = np.random.randn(D1, D2) W2 = np.random.randn(D2, D3) bn_param = {'mode': 'train'} gamma = np.ones(D3) beta = np.zeros(D3) for t in xrange(200): X = np.random.randn(N, D1) a = np.maximum(0, X.dot(W1)).dot(W2) batchnorm_forward(a, gamma, beta, bn_param) bn_param['mode'] = 'test' X = np.random.randn(N, D1) a = np.maximum(0, X.dot(W1)).dot(W2) a_norm, _ = batchnorm_forward(a, gamma, beta, bn_param) # Means should be close to zero and stds close to one, but will be # noisier than training-time forward passes. print 'After batch normalization (test-time):' print ' means: ', a_norm.mean(axis=0) print ' stds: ', a_norm.std(axis=0) """ Explanation: Batch normalization: Forward In the file cs231n/layers.py, implement the batch normalization forward pass in the function batchnorm_forward. Once you have done so, run the following to test your implementation. End of explanation """ # Gradient check batchnorm backward pass N, D = 4, 5 x = 5 * np.random.randn(N, D) + 12 gamma = np.random.randn(D) beta = np.random.randn(D) dout = np.random.randn(N, D) bn_param = {'mode': 'train'} fx = lambda x: batchnorm_forward(x, gamma, beta, bn_param)[0] fg = lambda a: batchnorm_forward(x, gamma, beta, bn_param)[0] fb = lambda b: batchnorm_forward(x, gamma, beta, bn_param)[0] dx_num = eval_numerical_gradient_array(fx, x, dout) da_num = eval_numerical_gradient_array(fg, gamma, dout) db_num = eval_numerical_gradient_array(fb, beta, dout) _, cache = batchnorm_forward(x, gamma, beta, bn_param) dx, dgamma, dbeta = batchnorm_backward(dout, cache) print 'dx error: ', rel_error(dx_num, dx) print 'dgamma error: ', rel_error(da_num, dgamma) print 'dbeta error: ', rel_error(db_num, dbeta) """ Explanation: Batch Normalization: backward Now implement the backward pass for batch normalization in the function batchnorm_backward. To derive the backward pass you should write out the computation graph for batch normalization and backprop through each of the intermediate nodes. Some intermediates may have multiple outgoing branches; make sure to sum gradients across these branches in the backward pass. Once you have finished, run the following to numerically check your backward pass. End of explanation """ N, D = 4, 5 x = 5 * np.random.randn(N, D) + 12 gamma = np.random.randn(D) beta = np.random.randn(D) dout = np.random.randn(N, D) #Forward pass def forward(x): #Step 1 N,D = x.shape mu = 1 / float(N) * np.sum(x, axis=0) #Step 2 xcenter = x - mu #Step 3 out = xcenter cache = (mu,xcenter) return out,cache #Backward pass def backward(dout,cache): mu,xcenter = cache #Backprop #Step 3 dxcenter = dout #Step2 dx = np.ones(x.shape)*dxcenter dmu = -np.sum(dxcenter,0) #Step1 dx += 1/float(N)*np.ones(x.shape)*dmu return dx # Test of the implementation with numerical gradient fx = lambda x: forward(x)[0] dx_num = eval_numerical_gradient_array(fx, x, dout) _, cache = forward(x) dx = backward(dout, cache) print 'dx error: ', rel_error(dx_num, dx) """ Explanation: Test on simple example - Remove the mean only We focus on a easier example to begin with where $$ y = x-\mu $$ instead of $$ y = \gamma \hat{x}-\beta$$ with $$\hat{x}=\frac{x-\mu}{\sqrt{\sigma^2+\epsilon}}$$ End of explanation """ def backward_alt(dout,cache): mu,xcenter = cache #Backprop #Step 3 dx = dout-1./float(N)*np.sum(dout,axis=0) return dx dx2 = backward_alt(dout,cache) rel_error(dx2,dx) """ Explanation: The direct implementation is a bit trickier. We have to find an expression for $\frac{dL}{dx_{ij}}$. First, note that: $$ y_{kl} = x_{kl}-\mu_l = x_{kl}-\frac{1}{N}\sum_p x_{pl}. $$ Then \begin{eqnarray} \frac{dL}{dx_{ij}} &=& \sum_{k,l} \frac{dL}{dy_{kl}} \frac{dy_{kl}}{dx_{ij}} \ &=& \sum_{k,l} \frac{dL}{dy_{kl}}\left( \delta_{ik}\delta{jl}-\frac{1}{N}\delta_jl\right)\ &=&\sum_{k,l} \frac{dL}{dy_{kl}}\delta_{ik}\delta{jl}-\frac{1}{N}\sum_{k,l} \delta_{jl}\frac{dL}{dy_{kl}}\ &=& \frac{dL}{dy_{ij}}-1/N\sum_{k=1}^N\frac{dL}{dy_{kj}} \end{eqnarray} which in our numerical computation way of writing it is simply ```python dx = dout-1/N*np.sum(dout,axis=0) ```` As dout stands for dL/dy and dx stands for dL/dx keeping the notation of our previous example. End of explanation """ N, D = 100, 500 x = 5 * np.random.randn(N, D) + 12 gamma = np.random.randn(D) beta = np.random.randn(D) dout = np.random.randn(N, D) bn_param = {'mode': 'train'} out, cache = batchnorm_forward(x, gamma, beta, bn_param) t1 = time.time() dx1, dgamma1, dbeta1 = batchnorm_backward(dout, cache) t2 = time.time() dx2, dgamma2, dbeta2 = batchnorm_backward_alt(dout, cache) t3 = time.time() print 'dx difference: ', rel_error(dx1, dx2) print 'dgamma difference: ', rel_error(dgamma1, dgamma2) print 'dbeta difference: ', rel_error(dbeta1, dbeta2) print 'speedup: %.2fx' % ((t2 - t1) / (t3 - t2)) """ Explanation: Batch Normalization: alternative backward In class we talked about two different implementations for the sigmoid backward pass. One strategy is to write out a computation graph composed of simple operations and backprop through all intermediate values. Another strategy is to work out the derivatives on paper. For the sigmoid function, it turns out that you can derive a very simple formula for the backward pass by simplifying gradients on paper. Surprisingly, it turns out that you can also derive a simple expression for the batch normalization backward pass if you work out derivatives on paper and simplify. After doing so, implement the simplified batch normalization backward pass in the function batchnorm_backward_alt and compare the two implementations by running the following. Your two implementations should compute nearly identical results, but the alternative implementation should be a bit faster. NOTE: You can still complete the rest of the assignment if you don't figure this part out, so don't worry too much if you can't get it. Draft for the solution So this, time we want to find $\frac{dL}{d\gamma}$, $\frac{dL}{d\beta}$ and $\frac{dL}{dx}$ with $$ y = \gamma \hat{x}-\beta$$ where $$\hat{x}=\frac{x-\mu}{\sqrt{\sigma^2+\epsilon}} = (x-\mu)(\sigma^2+\epsilon)^{-1/2}$$ Therefore, we note for the following that $$y_{kl} = \gamma_l \hat{x_{kl}}-\beta_l$$ and $$\hat{x_{kl}}=(x_{kl}-\mu_l)(\sigma_l^2+\epsilon)^{-1/2}$$ where $$\mu_l = \frac{1}{N}\sum_p x_{pl}$$ and $$\sigma_l^2 = \frac{1}{N}\sum_p \left(x_{pl}-\mu_l\right)^2$$ Let's begin by the easy one ! \begin{eqnarray} \frac{dL}{d\gamma_j} &=& \sum_{kl}\frac{dL}{dy_{kl}}\frac{dy_{kl}}{d\gamma_j}\ &=& \sum_{kl}\frac{dL}{dy_{kl}}x_{kl}\delta_{lj}\ &=& \sum_{k}\frac{dL}{dy_{kj}}x_{kj} \end{eqnarray} for $\beta$ we have \begin{eqnarray} \frac{dL}{d\beta_j} &=& \sum_{kl}\frac{dL}{dy_{kl}}\frac{dy_{kl}}{d\beta_j}\ &=& -\sum_{kl}\frac{dL}{dy_{kl}}\delta_{lj}\ &=& -\sum_{k}\frac{dL}{dy_{kj}} \end{eqnarray} Ok. Let's start the serious one. \begin{eqnarray} \frac{dL}{dx_{ij}} &=& \sum_{kl}\frac{dL}{dy_{kl}}\frac{dy_{kl}}{dx_{ij}}\ &=& \sum_{kl}\frac{dL}{dy_{kl}}\frac{dy_{kl}}{d\hat{x}{kl}}\frac{d\hat{x}{kl}}{dx_{ij}} \end{eqnarray} where $$\hat{x_{kl}}=(x_{kl}-\mu_l)(\sigma_l^2+\epsilon)^{-1/2}$$. First, we have: $$ \frac{dy_{kl}}{d\hat{x}{kl}} = \gamma_l$$ and \begin{eqnarray} \frac{d\hat{x}{kl}}{dx_{ij}} = (\delta_{ik}\delta_{jl}-\frac{1}{N}\delta_{jl})(\sigma_l^2+\epsilon)^{-1/2}-\frac{1}{2}(x_{kl}-\mu_l)\frac{d\sigma_l^2}{dx_{ij}}(\sigma_l^2+\epsilon)^{-3/2} \end{eqnarray} where $$\sigma_l^2 = \frac{1}{N}\sum_p \left(x_{pl}-\mu_l\right)^2$$ and then, \begin{eqnarray} \frac{d\sigma_l^2}{dx_{ij}} &=& \frac{1}{N}\sum_p2\left(\delta_{ip}\delta_{jl}-\frac{1}{N}\delta_{jl}\right)\left(x_{pl}-\mu_l\right)\ &=&\frac{2}{N}(x_{il}-\mu_l)\delta_{jl}-\frac{2}{N^2}\sum_p\delta_{jl}\left(x_{pl}-\mu_l\right)\ &=& \frac{2}{N}(x_{il}-\mu_l)\delta_{jl} \end{eqnarray} Putting everything together we thus have \begin{eqnarray} \frac{d\hat{x}{kl}}{dx{ij}} = (\delta_{ik}\delta_{jl}-\frac{1}{N}\delta_{jl})(\sigma_l^2+\epsilon)^{-1/2}-\frac{1}{N}(x_{kl}-\mu_l)(x_{il}-\mu_l)\delta_{jl}(\sigma_l^2+\epsilon)^{-3/2} \end{eqnarray} and therefore \begin{eqnarray} \frac{dL}{dx_{ij}} &=& \sum_{kl}\frac{dL}{dy_{kl}}\frac{dy_{kl}}{d\hat{x}{kl}}\frac{d\hat{x}{kl}}{dx_{ij}}\ &=& \sum_{kl}\frac{dL}{dy_{kl}}\gamma_l\left((\delta_{ik}\delta_{jl}-\frac{1}{N}\delta_{jl})(\sigma_l^2+\epsilon)^{-1/2}-\frac{1}{N}(x_{kl}-\mu_l)(x_{il}-\mu_l)\delta_{jl}(\sigma_l^2+\epsilon)^{-3/2}\right)\ &=&\sum_{kl}\frac{dL}{dy_{kl}}\gamma_l\left((\delta_{ik}\delta_{jl}-\frac{1}{N}\delta_{jl})(\sigma_l^2+\epsilon)^{-1/2}\right)-\sum_{kl}\frac{dL}{dy_{kl}}\gamma_l\left(\frac{1}{N}(x_{kl}-\mu_l)(x_{il}-\mu_l)\delta_{jl}(\sigma_l^2+\epsilon)^{-3/2}\right)\ &=&\frac{dL}{dy_{ij}}\gamma_j(\sigma_j^2+\epsilon)^{-1/2}-\frac{1}{N}\sum_{k}\frac{dL}{dy_{kj}}\gamma_j(\sigma_j^2+\epsilon)^{-1/2}-\frac{1}{N}\sum_{k}\frac{dL}{dy_{kj}}\gamma_j\left((x_{kj}-\mu_j)(x_{ij}-\mu_j)(\sigma_j^2+\epsilon)^{-3/2}\right)\ &=&\frac{1}{N}\gamma_j(\sigma_j^2+\epsilon)^{-1/2}\left(N\frac{dL}{dy_{ij}}-\sum_k\frac{dL}{dy_{kj}}-(x_{ij}-\mu_j)(\sigma_j^2+\epsilon)^{-1}\sum_k\frac{dL}{dy_{kj}}(x_{kj}-\mu_j)\right) \end{eqnarray} End of explanation """ N, D, H1, H2, C = 2, 10, 20, 30, 10 X = np.random.randn(N, D) y = np.random.randint(C, size=(N,)) for reg in [0, 3.14]: print 'Running check with reg = ', reg model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C, reg=reg, weight_scale=5e-2, dtype=np.float64, use_batchnorm=True) loss, grads = model.loss(X, y) print 'Initial loss: ', loss for name in sorted(grads): f = lambda _: model.loss(X, y)[0] grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5) print '%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])) if reg == 0: print """ Explanation: Fully Connected Nets with Batch Normalization Now that you have a working implementation for batch normalization, go back to your FullyConnectedNet in the file cs2312n/classifiers/fc_net.py. Modify your implementation to add batch normalization. Concretely, when the flag use_batchnorm is True in the constructor, you should insert a batch normalization layer before each ReLU nonlinearity. The outputs from the last layer of the network should not be normalized. Once you are done, run the following to gradient-check your implementation. HINT: You might find it useful to define an additional helper layer similar to those in the file cs231n/layer_utils.py. If you decide to do so, do it in the file cs231n/classifiers/fc_net.py. End of explanation """ # Try training a very deep net with batchnorm hidden_dims = [100, 100, 100, 100 ,100] num_train = 1000 small_data = { 'X_train': data['X_train'][:num_train], 'y_train': data['y_train'][:num_train], 'X_val': data['X_val'], 'y_val': data['y_val'], } weight_scale = 2e-2 bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True) model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False) bn_solver = Solver(bn_model, small_data, num_epochs=30, batch_size=50, update_rule='adam', optim_config={ 'learning_rate': 1e-3, }, verbose=True, print_every=200) bn_solver.train() solver = Solver(model, small_data, num_epochs=30, batch_size=50, update_rule='adam', optim_config={ 'learning_rate': 1e-3, }, verbose=True, print_every=200) solver.train() """ Explanation: Batchnorm for deep networks Run the following to train a six-layer network on a subset of 1000 training examples both with and without batch normalization. End of explanation """ plt.subplot(3, 1, 1) plt.title('Training loss') plt.xlabel('Iteration') plt.subplot(3, 1, 2) plt.title('Training accuracy') plt.xlabel('Epoch') plt.subplot(3, 1, 3) plt.title('Validation accuracy') plt.xlabel('Epoch') plt.subplot(3, 1, 1) plt.plot(solver.loss_history, 'o', label='baseline') plt.plot(bn_solver.loss_history, 'o', label='batchnorm') plt.subplot(3, 1, 2) plt.plot(solver.train_acc_history, '-o', label='baseline') plt.plot(bn_solver.train_acc_history, '-o', label='batchnorm') plt.subplot(3, 1, 3) plt.plot(solver.val_acc_history, '-o', label='baseline') plt.plot(bn_solver.val_acc_history, '-o', label='batchnorm') for i in [1, 2, 3]: plt.subplot(3, 1, i) plt.legend(loc='upper center', ncol=4) plt.gcf().set_size_inches(15, 15) plt.show() """ Explanation: Run the following to visualize the results from two networks trained above. You should find that using batch normalization helps the network to converge much faster. End of explanation """ # Try training a very deep net with batchnorm hidden_dims = [50, 50, 50, 50, 50, 50, 50] num_train = 1000 small_data = { 'X_train': data['X_train'][:num_train], 'y_train': data['y_train'][:num_train], 'X_val': data['X_val'], 'y_val': data['y_val'], } bn_solvers = {} solvers = {} weight_scales = np.logspace(-4, 0, num=20) for i, weight_scale in enumerate(weight_scales): print 'Running weight scale %d / %d' % (i + 1, len(weight_scales)) bn_model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=True) model = FullyConnectedNet(hidden_dims, weight_scale=weight_scale, use_batchnorm=False) bn_solver = Solver(bn_model, small_data, num_epochs=10, batch_size=50, update_rule='adam', optim_config={ 'learning_rate': 1e-3, }, verbose=False, print_every=200) bn_solver.train() bn_solvers[weight_scale] = bn_solver solver = Solver(model, small_data, num_epochs=10, batch_size=50, update_rule='adam', optim_config={ 'learning_rate': 1e-3, }, verbose=False, print_every=200) solver.train() solvers[weight_scale] = solver np.array(solvers[weight_scales[-2]].loss_history).mean() # Plot results of weight scale experiment best_train_accs, bn_best_train_accs = [], [] best_val_accs, bn_best_val_accs = [], [] final_train_loss, bn_final_train_loss = [], [] for ws in weight_scales: best_train_accs.append(max(solvers[ws].train_acc_history)) bn_best_train_accs.append(max(bn_solvers[ws].train_acc_history)) best_val_accs.append(max(solvers[ws].val_acc_history)) bn_best_val_accs.append(max(bn_solvers[ws].val_acc_history)) final_train_loss.append(np.mean(solvers[ws].loss_history)) bn_final_train_loss.append(np.mean(bn_solvers[ws].loss_history)) plt.subplot(3, 1, 1) plt.title('Best val accuracy vs weight initialization scale') plt.xlabel('Weight initialization scale') plt.ylabel('Best val accuracy') plt.semilogx(weight_scales, best_val_accs, '-o', label='baseline') plt.semilogx(weight_scales, bn_best_val_accs, '-o', label='batchnorm') plt.legend(ncol=2, loc='lower right') plt.subplot(3, 1, 2) plt.title('Best train accuracy vs weight initialization scale') plt.xlabel('Weight initialization scale') plt.ylabel('Best training accuracy') plt.semilogx(weight_scales, best_train_accs, '-o', label='baseline') plt.semilogx(weight_scales, bn_best_train_accs, '-o', label='batchnorm') plt.legend() plt.subplot(3, 1, 3) plt.title('Final training loss vs weight initialization scale') plt.xlabel('Weight initialization scale') plt.ylabel('Final training loss') plt.semilogx(weight_scales, final_train_loss, '-o', label='baseline') plt.semilogx(weight_scales, bn_final_train_loss, '-o', label='batchnorm') plt.legend() plt.gcf().set_size_inches(10, 15) plt.show() """ Explanation: Batch normalization and initialization We will now run a small experiment to study the interaction of batch normalization and weight initialization. The first cell will train 8-layer networks both with and without batch normalization using different scales for weight initialization. The second layer will plot training accuracy, validation set accuracy, and training loss as a function of the weight initialization scale. End of explanation """
sdpython/ensae_teaching_cs
_doc/notebooks/td2a/pandas_iterator_correction.ipynb
mit
from jyquickhelper import add_notebook_menu add_notebook_menu() from sklearn.datasets import load_iris data = load_iris() import pandas df = pandas.DataFrame(data.data) df.column = "X1 X2 X3 X4".split() df["target"] = data.target df.head(n=2) """ Explanation: 2A.data - Pandas et itérateurs - correction pandas a tendance a prendre beaucoup d'espace mémoire pour charger les données, environ trois fois plus que sa taille sur disque. Quand la mémoire n'est pas assez grande, que peut-on faire ? End of explanation """ import sklearn.utils df = sklearn.utils.shuffle(df) df.to_csv("iris.txt", sep="\t", index=False) """ Explanation: On mélange les lignes car le dataframe est trié et cela masque quelques effets aléatoires. End of explanation """ for df in pandas.read_csv("iris.txt", sep="\t", iterator=True, chunksize=60): print(df.shape) """ Explanation: Exercice 1 : itérer sur un grand fichier Le paramètre iterator de la fonction read_csv sert à parcourir un fichier par blocs dont la taille est définie par le paramètres chunksize. La fonction read_csv implémente ce mécanisme. End of explanation """ from sklearn.model_selection import train_test_split df_full_it = pandas.read_csv('iris.txt', sep='\t', chunksize=10, encoding='utf-8', engine='python') first_exec = True for df_full_chunk in df_full_it: X_train_chunk, X_test_chunk = train_test_split(df_full_chunk) if first_exec: X_train_chunk.to_csv("X_train.csv", sep="\t", index=False) X_test_chunk.to_csv("X_test.csv", sep="\t", index=False) first_exec = False else: X_train_chunk.to_csv("X_train.csv", sep="\t", index=False, mode='a', header=False) X_test_chunk.to_csv("X_test.csv", sep="\t", index=False, mode='a', header=False) X_train = pandas.read_csv("X_train.csv", sep="\t") X_train.head(n=2) X_train.shape X_test = pandas.read_csv("X_test.csv", sep="\t") X_test.head(n=2) X_test.shape X_train.groupby("target").count() """ Explanation: Exercice 2 : split train test La solution proposée est implémentée par train_test_split. End of explanation """ from sklearn.model_selection import train_test_split strat_name = 'target' df_full_it = pandas.read_csv('iris.txt', sep='\t', chunksize=10, encoding='utf-8', dtype=object, engine='python') strat_list = [] for df_full_chunk in df_full_it: for current_strat in df_full_chunk[strat_name].unique(): if str(current_strat) in strat_list: selection = df_full_chunk[df_full_chunk[strat_name] == current_strat] selection.to_csv("strat_{}.csv".format(current_strat), sep="\t", index=False, encoding='utf-8', mode='a', header=False) else: strat_list.append(str(current_strat)) selection = df_full_chunk[df_full_chunk[strat_name] == current_strat] selection.to_csv("strat_{}.csv".format(current_strat), sep="\t", index=False, encoding='utf-8') first_exec = True for current_strat in strat_list: df_strat_it = pandas.read_csv("strat_{}.csv".format(current_strat), sep='\t', chunksize=1000, encoding='utf-8', dtype=object, engine='python') for df_strat_chunk in df_strat_it: X_train_chunk, X_test_chunk = train_test_split(df_strat_chunk) if first_exec: X_train_chunk.to_csv("X_train_strat.csv", sep="\t", index=False, encoding='utf-8') X_test_chunk.to_csv("X_test_strat.csv", sep="\t", index=False, encoding='utf-8') first_exec = False else: X_train_chunk.to_csv("X_train_strat.csv", sep="\t", index=False, encoding='utf-8', mode='a', header=False) X_test_chunk.to_csv("X_test_strat.csv", sep="\t", index=False, encoding='utf-8', mode='a', header=False) """ Explanation: La répartition des classes n'est pas uniforme. Lorsque les classes sont bien représentées, cela ne nuit pas aux résultats. En revanche, des classes sous-représentées pourraient disparaître de l'une des deux parties. Exercice 3 : stratify ? Le paramètre stratify est intéressant pour un problème de classification et quand une classes et sous-représentée. Il est fort probable que cette classe ne soit pas assez représentée dans l'un des deux jeux et c'est pourquoi il existe une option pour imposer un nombre d'exemples de cette dans chaque des deux jeux (train, test). La qualité des modèles est accrue tout comme la qualité des sondages sur un échantillonnage stratifié. Si jamais tout ces exemples sont placés au début du gros fichier à lire, le programme commence à avoir une fausse idée de la répartition des classes. La seule façon de faire est de faire d'abord une division train/test par classe (indiqué par la variable de stratification) puis de recomposer les bases d'apprentissage et de tests en imposant les proportions voulues. End of explanation """ X_train = pandas.read_csv("X_train_strat.csv", sep="\t") X_train.head(n=2) X_train.groupby("target").count() X_test = pandas.read_csv("X_test_strat.csv", sep="\t") X_test.head(n=2) X_test.groupby("target").count() """ Explanation: On vérifie que l'échantillon est stratifiée. End of explanation """
jasonarita/Kaggle-Titanic-R-Python
0-Basic Model-All Survive or Die.ipynb
mit
import csv as csv import numpy as np """ Explanation: The Most Basic-est Model of Them All They all survive End of explanation """ test_file = open('./data/test.csv', 'rb') # Open the test data test_file_object = csv.reader(test_file) header = test_file_object.next() header """ Explanation: The optimistic model: They All Survive End of explanation """ predictions_file = open('./models/jfaPythonModel-allSurvive.csv', 'wb') predictions_file_object = csv.writer(predictions_file) """ Explanation: Open a new model (.csv) file to write to End of explanation """ predictions_file_object.writerow(['PassengerID', 'Survived']) for row in test_file_object: predictions_file_object.writerow([row[0], "1"]) test_file.close() predictions_file.close() """ Explanation: Write the columns header row End of explanation """ output_predictions_file = open('./models/jfaPythonModel-allSurvive.csv', 'rb') output_predictions_file_object = csv.reader(output_predictions_file) data = [] for row in output_predictions_file_object: data.append(row[0:]) data = np.array(data) data.shape data """ Explanation: Take a look at the resulting predictions End of explanation """ test_data = open('./data/test.csv') test_data_object = csv.reader(test_data) """ Explanation: Kaggle Submission Results Your submission scored 0.37321 Only 37% correct. Looks like we can do better! The Overly Pessimistic Model Let's create a model which predicts that all Titanic passengers die. First open up the test data End of explanation """ header = test_data_object.next() header """ Explanation: Skip the first row because it's the header row End of explanation """ predictions_file = open('./models/jfaPythonModel-allDie.csv', 'wb') predictions_file_object = csv.writer(predictions_file) predictions_file_object.writerow(['PassengerID', 'Survived']) """ Explanation: Let's open up an output prediction model/csv-file End of explanation """ for passenger in test_data_object: predictions_file_object.writerow([passenger[0], "0"]) """ Explanation: Write in the prediction model that they all died End of explanation """ test_data.close() predictions_file.close() """ Explanation: Close the test data file and the predicitons file End of explanation """ output_predictions_file = open('./models/jfaPythonModel-allDie.csv', 'rb') output_predictions_file_object = csv.reader(output_predictions_file) data = [] for passenger in output_predictions_file_object: data.append(passenger[0:]) data = np.array(data) data.shape output_predictions_file.close() """ Explanation: Take a look at the output predictions End of explanation """
bspalding/research_public
lectures/drafts/Measures of Dispersion.ipynb
apache-2.0
import numpy as np import math np.random.seed(121) X = np.sort(np.random.randint(100, size=20)) print 'X:', X mu = np.mean(X) print 'Mean of X:', mu """ Explanation: Dispersion By Evgenia "Jenny" Nitishinskaya and Delaney Granizo-Mackenzie Notebook released under the Creative Commons Attribution 4.0 License. Dispersion measures how spread out a set of data is. This corresponds to risk when our data set is returns over time. Data with low dispersion is heavily clustered around the mean, while high dispersion a indicates many very large and very small values. Let's generate an array of random integers to work with. End of explanation """ print 'Range of X:', np.ptp(X) """ Explanation: Range Range is simply the difference between the maximum and minimum values in a dataset. Not surprisingly, it is very sensitive to outliers. End of explanation """ abs_dispersion = [abs(mu - x) for x in X] MAD = sum(abs_dispersion)/len(abs_dispersion) print 'Mean absolute deviation of X:', MAD """ Explanation: Mean absolute deviation The mean absolute deviation is the average of the distances of observations from the arithmetic mean. We use the absolute value of the deviation, so that 5 above the mean and 5 below the mean both contribute 5, because otherwise the deviations always sum to 0. $$ MAD = \frac{\sum_{i=1}^n |X_i - \mu|}{n} $$ where $n$ is the number of observations and $\mu$ is their mean. End of explanation """ print 'Variance of X:', np.var(X) print 'Standard deviation of X:', np.std(X) """ Explanation: Variance and standard deviation The variance $\sigma^2$ is defined as the average of the squared deviations around the mean: $$ \sigma^2 = \frac{\sum_{i=1}^n (X_i - \mu)^2}{n} $$ This is sometimes more convenient than the mean absolute deviation because absolute value is not differentiable, while squaring is smooth, and some optimization algorithms rely on differentiability. Standard deviation is defined as the square root of the variance, $\sigma$, and it is the easier of the two to interpret because it is in the same units as the observations. End of explanation """ k = 1.25 dist = k*np.std(X) l = [x for x in X if abs(x - mu) <= dist] print 'Observations within', k, 'stds of mean:', l print 'Confirming that', float(len(l))/len(X), '>', 1 - 1/k**2 """ Explanation: One way to interpret standard deviation is by referring to Chebyshev's inequality. This tells us that the proportion of samples within $k$ standard deviations (that is, within a distance of $k \cdot$ standard deviation) of the mean is at least $1 - 1/k^2$ for all $k>1$. Let's check that this is true for our data set. End of explanation """ # Because there is no built-in semideviation, we'll compute it ourselves lows = [e for e in X if e <= mu] semivar = sum(map(lambda x: (x - mu)**2,lows))/len(lows) print 'Semivariance of X:', semivar print 'Semideviation of X:', math.sqrt(semivar) """ Explanation: The bound given by Chebyshev's inequality seems fairly loose in this case. This bound is rarely strict, but it is useful because it holds for all data sets and distributions. Semivariance and semideviation Although variance and standard deviation tell us how volatile a quantity is, they do not differentiate between deviations upward and deviations downward. Often, such as in the case of returns on an asset, we are more worried about deviations downward. This is addressed by semivariance and semideviation, which only count the observations that fall below the mean. Semivariance is defined as $$ \frac{\sum_{X_i < \mu} (X_i - \mu)^2}{n_<} $$ where $n_<$ is the number of observations which are smaller than the mean. Semideviation is the square root of the semivariance. End of explanation """ B = 19 lows_B = [e for e in X if e <= B] semivar_B = sum(map(lambda x: (x - B)**2,lows_B))/len(lows_B) print 'Target semivariance of X:', semivar_B print 'Target semideviation of X:', math.sqrt(semivar_B) """ Explanation: A related notion is target semivariance (and target semideviation), where we average the distance from a target of values which fall below that target: $$ \frac{\sum_{X_i < B} (X_i - B)^2}{n_{<B}} $$ End of explanation """
Cyb3rWard0g/ThreatHunter-Playbook
docs/notebooks/windows/05_defense_evasion/WIN-190510202010.ipynb
gpl-3.0
from openhunt.mordorutils import * spark = get_spark() """ Explanation: WDigest Downgrade Metadata | | | |:------------------|:---| | collaborators | ['@Cyb3rWard0g', '@Cyb3rPandaH'] | | creation date | 2019/05/10 | | modification date | 2020/09/20 | | playbook related | [] | Hypothesis Adversaries might have updated the property value UseLogonCredential of HKLM:\SYSTEM\CurrentControlSet\Control\SecurityProviders\WDigest to 1 in order to be able to extract clear text passwords from memory contents of lsass. Technical Context Windows 8.1 introduced a registry setting that allows for disabling the storage of the user’s logon credential in clear text for the WDigest provider. Offensive Tradecraft This setting can be modified in the property UseLogonCredential for the registry key HKLM:\SYSTEM\CurrentControlSet\Control\SecurityProviders\WDigest. If this key does not exists, you can create it and set it to 1 to enable clear text passwords. Mordor Test Data | | | |:----------|:----------| | metadata | https://mordordatasets.com/notebooks/small/windows/05_defense_evasion/SDWIN-190518201922.html | | link | https://raw.githubusercontent.com/OTRF/mordor/master/datasets/small/windows/defense_evasion/host/empire_wdigest_downgrade.tar.gz | Analytics Initialize Analytics Engine End of explanation """ mordor_file = "https://raw.githubusercontent.com/OTRF/mordor/master/datasets/small/windows/defense_evasion/host/empire_wdigest_downgrade.tar.gz" registerMordorSQLTable(spark, mordor_file, "mordorTable") """ Explanation: Download & Process Mordor Dataset End of explanation """ df = spark.sql( ''' SELECT `@timestamp`, Hostname, Image, TargetObject FROM mordorTable WHERE Channel = "Microsoft-Windows-Sysmon/Operational" AND EventID = 13 AND TargetObject LIKE "%UseLogonCredential" AND Details = 1 ''' ) df.show(10,False) """ Explanation: Analytic I Look for any process updating UseLogonCredential registry key value | Data source | Event Provider | Relationship | Event | |:------------|:---------------|--------------|-------| | Windows registry | Microsoft-Windows-Sysmon/Operational | Process modified Windows registry key value | 13 | End of explanation """
cogitare-ai/cogitare
docs/source/quickstart.ipynb
mit
# adapted from https://github.com/pytorch/examples/blob/master/mnist/main.py from cogitare import Model from cogitare import utils from cogitare.data import DataSet, AsyncDataLoader from cogitare.plugins import EarlyStopping from cogitare.metrics.classification import accuracy import cogitare import torch.nn as nn import torch import torch.nn.functional as F from torch.nn.utils import clip_grad_norm import torch.optim as optim from sklearn.datasets import fetch_mldata import numpy as np CUDA = True cogitare.utils.set_cuda(CUDA) class CNN(Model): def __init__(self): super(CNN, self).__init__() # define the model self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, batch): # in this sample, each batch will be a tuple containing (input_batch, expected_batch) # in forward in are only interested in input so that we can ignore the second item of the tuple input, _ = batch # batch X flat tensor -> batch X 1 channel (gray) X width X heigth input = input.view(32, 1, 28, 28) # pass the data in the net x = F.relu(F.max_pool2d(self.conv1(input), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = x.view(-1, 320) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) # return the model output return F.log_softmax(x, dim=1) def loss(self, output, batch): # in this sample, each batch will be a tuple containing (input_batch, expected_batch) # in loss in are only interested in expected so that we can ignore the first item of the tuple _, expected = batch return F.nll_loss(output, expected) """ Explanation: Quickstart This is a simple tutorial to get started with Cogitare main functionalities. In this tutorial, we will write a Convolutional Neural Network (CNN) to classify handwritten digits (MNIST). Model We start by defining our CNN model. When developing a model with Cogitare, your model must extend the cogitare.Model class. This class provides the Model interface, which allows you to train and evaluate the model efficiently. To implement a model, you must extend the cogitare.Model class and implement the forward() and loss() methods. The forward method will receive the batch. In this way, it is necessary to implement the forward pass through the network in this method, and then return the output of the net. The loss method will receive the output of the forward() and the batch received from the iterator, apply a loss function, compute and return it. The Model interface will iterate over the dataset, and execute each batch on forward, loss, and backward. End of explanation """ mnist = fetch_mldata('MNIST original') mnist.data = (mnist.data / 255).astype(np.float32) """ Explanation: The model class is simple; it only requires de forward and loss methods. By default, Cogitare will backward the loss returned by the loss() method, and optimize the model parameters. If you want to disable the Cogitare backward and optimization steps, just return None in the loss function. If you return None, you are responsible by backwarding and optimizing the parameters. Data Loading In this step, we will load the data from sklearn package. End of explanation """ # as input, the DataSet is expected a list of iterators. In our case, the first iterator is the input # data and the second iterator is the target data # also, we set the batch size to 32 and enable the shuffling # drop the last batch if its size is different of 32 data = DataSet([mnist.data, mnist.target.astype(int)], batch_size=32, shuffle=True, drop_last=True) # then, we split our dataset into a train and into a validation sets, by a ratio of 0.8 data_train, data_validation = data.split(0.8) """ Explanation: Cogitare provides a toolbox to load and pre-process data for your models. In this introduction, we will use the DataSet and the AsyncDataLoader as examples. The DataSet is responsible by iterating over multiples data iterators (in our case, we'll have two data iterators: input samples, expected samples). End of explanation """ def pre_process(batch): input, expected = batch # the data is a numpy.ndarray (loaded from sklearn), so we need to convert it to Variable input = utils.to_variable(input, dtype=torch.FloatTensor) # converts to a torch Variable of LongTensor expected = utils.to_variable(expected, dtype=torch.LongTensor) # converts to a torch Variable of LongTensor return input, expected # we wrap our data_train and data_validation iterators over the async data loader. # each loader will load 16 batches ahead of the model execution using 8 workers (8 threads, in this case). # for each batch, it will be pre-processed in parallel with the preprocess function, that will load the data # on GPU data_train = AsyncDataLoader(data_train, buffer_size=16, mode='threaded', workers=8, on_batch_loaded=pre_process) data_validation = AsyncDataLoader(data_validation, buffer_size=16, mode='threaded', workers=8, on_batch_loaded=pre_process) """ Explanation: Notice that Cogitare accepts any iterator as input. Instead of using our DataSet, you can use the mnist.data itself, PyTorch's data loaders, or any other input that acts as an iterator. In some cases, we can increase the model performance by loading the data using multiples threads/processes or by pre-loading the data before being requested by the model. With the AsyncDataLoader, we can load N batches ahead of the model execution in parallel. We present this technique in this sample because it can increase performance in a wide range of models (when the data loading or pre-processing is slower than the model execution). End of explanation """ data_train.cache() data_validation.cache() """ Explanation: to cache the async buffer before training, we can: End of explanation """ next(data_train) """ Explanation: Let's look how the data looks like: End of explanation """ model = CNN() model.register_default_plugins() """ Explanation: Training Now, we can train our model. First, lets create the model instance and add the default plugins to watch the training status. The default plugin includes: Progress bar per batch and epoch Plot training and validation losses (if validation_dataset is present) Log training loss End of explanation """ early = EarlyStopping(max_tries=10, path='/tmp/model.pt') # after 10 epochs without decreasing the loss, stop the training and the best model is saved at /tmp/model.pt # the plugin will execute in the end of each epoch model.register_plugin(early, 'on_end_epoch') """ Explanation: Besides that, we may want to add some extra plugins, such as the EarlyStopping. So, if the model is not decreasing the loss after N epochs, the training stops and the best model is used. To add the early stopping algorithm, you can use: End of explanation """ model.register_plugin(lambda *args, **kw: clip_grad_norm(model.parameters(), 1.0), 'before_step') # will execute the clip_grad_norm before each optimization step """ Explanation: Also, a common technique is to clip the gradient during training. If you want to clip the grad, you can use: End of explanation """ optimizer = optim.Adam(model.parameters(), lr=0.001) if CUDA: model = model.cuda() model.learn(data_train, optimizer, data_validation, max_epochs=100) """ Explanation: Now, we define the optimizator, and then start the model training: End of explanation """ def model_accuracy(output, data): _, indices = torch.max(output, 1) return accuracy(indices, data[1]) # evaluate the model loss and accuracy over the validation dataset metrics = model.evaluate_with_metrics(data_validation, {'loss': model.metric_loss, 'accuracy': model_accuracy}) # the metrics is an dict mapping the metric name (loss or accuracy, in this sample) to a list of the accuracy output # we have a measurement per batch. So, to have a value of the full dataset, we take the mean value: metrics_mean = {'loss': 0, 'accuracy': 0} for loss, acc in zip(metrics['loss'], metrics['accuracy']): metrics_mean['loss'] += loss metrics_mean['accuracy'] += acc.item() qtd = len(metrics['loss']) print('Loss: {}'.format(metrics_mean['loss'] / qtd)) print('Accuracy: {}'.format(metrics_mean['accuracy'] / qtd)) """ Explanation: To check the model loss and accuracy on the validation dataset: End of explanation """
landlab/landlab
notebooks/tutorials/overland_flow/kinwave_implicit/kinwave_implicit_overland_flow.ipynb
mit
import copy import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt from landlab import RasterModelGrid, imshow_grid from landlab.components.overland_flow import KinwaveImplicitOverlandFlow from landlab.io.esri_ascii import read_esri_ascii print(KinwaveImplicitOverlandFlow.__doc__) """ Explanation: <a href="http://landlab.github.io"><img style="float: left" src="../../../landlab_header.png"></a> The Implicit Kinematic Wave Overland Flow Component <hr> <small>For more Landlab tutorials, click here: <a href="https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html">https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html</a></small> <hr> Overview This notebook demonstrates the KinwaveImplicitOverlandFlow Landlab component. The component implements a two-dimensional kinematic wave model of overland flow, using a digital elevation model or other source of topography as the surface over which water flows. Theory The kinematic wave equations are a simplified form of the 2D shallow-water equations in which energy slope is assumed to equal bed slope. Conservation of water mass is expressed in terms of the time derivative of the local water depth, $H$, and the spatial derivative (divergence) of the unit discharge vector $\mathbf{q} = UH$ (where $U$ is the 2D depth-averaged velocity vector): $$\frac{\partial H}{\partial t} = R - \nabla\cdot \mathbf{q}$$ where $R$ is the local runoff rate [L/T] and $\mathbf{q}$ has dimensions of volume flow per time per width [L$^2$/T]. The discharge depends on the local depth, bed-surface gradient $\mathbf{S}=-\nabla\eta$ (this is the kinematic wave approximation; $\eta$ is land surface height), and a roughness factor $C_r$: $$\mathbf{q} = \frac{1}{C_r} \mathbf{S} H^\alpha |S|^{-1/2}$$ Reads may recognize this as a form of the Manning, Chezy, or Darcy-Weisbach equation. If $\alpha = 5/3$ then we have the Manning equation, and $C_r = n$ is "Manning's n". If $\alpha = 3/2$ then we have the Chezy/Darcy-Weisbach equation, and $C_r = 1/C = (f/8g)^{1/2}$ represents the Chezy roughness factor $C$ and the equivalent Darcy-Weisbach factor $f$. Numerical solution The solution method used by this component is locally implicit, and works as follows. At each time step, we iterate from upstream to downstream over the topography. Because we are working downstream, we can assume that we know the total water inflow to a given cell. We solve the following mass conservation equation at each cell: $$\frac{H^{t+1} - H^t}{\Delta t }= \frac{Q_{in}}{A} - \frac{Q_{out}}{A} + R$$ where $H$ is water depth at a given grid node, $t$ indicates time step number, $\Delta t$ is time step duration, $Q_{in}$ is total inflow discharge, $Q_{out}$ is total outflow discharge, $A$ is cell area, and $R$ is local runoff rate (precipitation minus infiltration; could be negative if runon infiltration is occurring). The specific outflow discharge leaving a cell along one of its faces is: $$q = (1/C_r) H^\alpha S^{1/2}$$ where $S$ is the downhill-positive gradient of the link that crosses this particular face. Outflow discharge is zero for links that are flat or "uphill" from the given node. Total discharge out of a cell is then the sum of (specific discharge x face width) over all outflow faces: $$Q_{out} = \sum_{i=1}^N (1/C_r) H^\alpha S_i^{1/2} W_i$$ where $N$ is the number of outflow faces (i.e., faces where the ground slopes downhill away from the cell's node), and $W_i$ is the width of face $i$. We use the depth at the cell's node, so this simplifies to: $$Q_{out} = (1/C_r) H'^\alpha \sum_{i=1}^N S_i^{1/2} W_i$$ Notice that we know everything here except $H'$. The reason we know $Q_{out}$ is that it equals $Q_{in}$ (which is either zero or we calculated it previously) plus $RA$. We define $H$ in the above as a weighted sum of the "old" (time step $t$) and "new" (time step $t+1$) depth values: $$H' = w H^{t+1} + (1-w) H^t$$ If $w=1$, the method is fully implicit. If $w=0$, it is a simple forward explicit method. When we combine these equations, we have an equation that includes the unknown $H^{t+1}$ and a bunch of terms that are known. If $w\ne 0$, it is a nonlinear equation in $H^{t+1}$, and must be solved iteratively. We do this using a root-finding method in the scipy.optimize library. In order to implement the algorithm, we must already know which of neighbors of each node are lower than the neighbor, and what the slopes between them are. We accomplish this using the FlowAccumulator and FlowDirectorMFD components. Running the FlowAccumulator also generates a sorted list (array) of nodes in drainage order. The component Import the needed libraries, then inspect the component's docstring: End of explanation """ print(KinwaveImplicitOverlandFlow.__init__.__doc__) """ Explanation: The docstring for the __init__ method will give us a list of parameters: End of explanation """ # Process parameters n = 0.01 # roughness coefficient, (s/m^(1/3)) dep_exp = 5.0 / 3.0 # depth exponent S = 0.01 # slope of plane R = 72.0 # runoff rate, mm/hr # Run-control parameters run_time = 240.0 # duration of run, (s) nrows = 5 # number of node rows ncols = 11 # number of node columns dx = 2.0 # node spacing, m dt = 10.0 # time-step size, s plot_every = 60.0 # plot interval, s # Derived parameters num_steps = int(run_time / dt) """ Explanation: Example 1: downpour on a plane The first example tests that the component can reproduce the expected steady flow pattern on a sloping plane, with a gradient of $S_p$. We'll adopt the Manning equation. Once the system comes into equilibrium, the discharge should increase width distance down the plane according to $q = Rx$. We can use this fact to solve for the corresponding water depth: $$(1/n) H^{5/3} S^{1/2} = R x$$ which implies $$H = \left( \frac{nRx}{S^{1/2}} \right)^{3/5}$$ This is the analytical solution against which to test the model. Pick the initial and run conditions End of explanation """ # create and set up grid grid = RasterModelGrid((nrows, ncols), xy_spacing=dx) grid.set_closed_boundaries_at_grid_edges(False, True, True, True) # open only on east # add required field elev = grid.add_zeros("topographic__elevation", at="node") # set topography elev[grid.core_nodes] = S * (np.amax(grid.x_of_node) - grid.x_of_node[grid.core_nodes]) """ Explanation: Create grid and fields: End of explanation """ imshow_grid(grid, elev) """ Explanation: Plot topography, first in plan view... End of explanation """ plt.plot(grid.x_of_node, elev) plt.xlabel("Distance (m)") plt.ylabel("Height (m)") plt.grid(True) # Instantiate the component olflow = KinwaveImplicitOverlandFlow( grid, runoff_rate=R, roughness=n, depth_exp=dep_exp ) # Helpful function to plot the profile def plot_flow_profile(grid, olflow): """Plot the middle row of topography and water surface for the overland flow model olflow.""" nc = grid.number_of_node_columns nr = grid.number_of_node_rows startnode = nc * (nr // 2) + 1 midrow = np.arange(startnode, startnode + nc - 1, dtype=int) topo = grid.at_node["topographic__elevation"] plt.plot( grid.x_of_node[midrow], topo[midrow] + grid.at_node["surface_water__depth"][midrow], "b", ) plt.plot(grid.x_of_node[midrow], topo[midrow], "k") plt.xlabel("Distance (m)") plt.ylabel("Ground and water surface height (m)") """ Explanation: ...then as a cross-section: End of explanation """ next_plot = plot_every for i in range(num_steps): olflow.run_one_step(dt) if (i + 1) * dt >= next_plot: plot_flow_profile(grid, olflow) next_plot += plot_every # Compare with analytical solution for depth Rms = R / 3.6e6 # convert to m/s nc = grid.number_of_node_columns x = grid.x_of_node[grid.core_nodes][: nc - 2] Hpred = (n * Rms * x / (S ** 0.5)) ** 0.6 plt.plot(x, Hpred, "r", label="Analytical") plt.plot( x, grid.at_node["surface_water__depth"][grid.core_nodes][: nc - 2], "b--", label="Numerical", ) plt.xlabel("Distance (m)") plt.ylabel("Water depth (m)") plt.grid(True) plt.legend() """ Explanation: Run the component forward in time, plotting the output in the form of a profile: End of explanation """ # Process parameters n = 0.1 # roughness coefficient, (s/m^(1/3)) dep_exp = 5.0 / 3.0 # depth exponent R = 72.0 # runoff rate, mm/hr # Run-control parameters rain_duration = 240.0 # duration of rainfall, s run_time = 480.0 # duration of run, s dt = 10.0 # time-step size, s dem_filename = "../hugo_site_filled.asc" # Derived parameters num_steps = int(run_time / dt) # set up arrays to hold discharge and time time_since_storm_start = np.arange(0.0, dt * (2 * num_steps + 1), dt) discharge = np.zeros(2 * num_steps + 1) # Read the DEM file as a grid with a 'topographic__elevation' field (grid, elev) = read_esri_ascii(dem_filename, name="topographic__elevation") # Configure the boundaries: valid right-edge nodes will be open; # all NODATA (= -9999) nodes will be closed. grid.status_at_node[grid.nodes_at_right_edge] = grid.BC_NODE_IS_FIXED_VALUE grid.status_at_node[np.isclose(elev, -9999.0)] = grid.BC_NODE_IS_CLOSED # display the topography cmap = copy.copy(mpl.cm.get_cmap("pink")) imshow_grid(grid, elev, colorbar_label="Elevation (m)", cmap=cmap) """ Explanation: Example 2: overland flow on a DEM For this example, we'll import a small digital elevation model (DEM) for a site in New Mexico, USA. End of explanation """ indices = np.where(elev[grid.nodes_at_right_edge] > 0.0)[0] outlet_nodes = grid.nodes_at_right_edge[indices] print("Outlet nodes:") print(outlet_nodes) print("Elevations of the outlet nodes:") print(elev[outlet_nodes]) # Instantiate the component olflow = KinwaveImplicitOverlandFlow( grid, runoff_rate=R, roughness=n, depth_exp=dep_exp ) discharge_field = grid.at_node["surface_water_inflow__discharge"] for i in range(num_steps): olflow.run_one_step(dt) discharge[i + 1] = np.sum(discharge_field[outlet_nodes]) plt.plot(time_since_storm_start[:num_steps], discharge[:num_steps]) plt.xlabel("Time (s)") plt.ylabel("Discharge (cms)") plt.grid(True) cmap = copy.copy(mpl.cm.get_cmap("Blues")) imshow_grid( grid, grid.at_node["surface_water__depth"], cmap=cmap, colorbar_label="Water depth (m)", ) """ Explanation: It would be nice to track discharge at the watershed outlet, but how do we find the outlet location? We actually have several valid nodes along the right-hand edge. Then we'll keep track of the field surface_water_inflow__discharge at these nodes. We can identify the nodes by the fact that they are (a) at the right-hand edge of the grid, and (b) have positive elevations (the ones with -9999 are outside of the watershed). End of explanation """ olflow.runoff_rate = 1.0 # just 1 mm/hr for i in range(num_steps, 2 * num_steps): olflow.run_one_step(dt) discharge[i + 1] = np.sum(discharge_field[outlet_nodes]) plt.plot(time_since_storm_start, discharge) plt.xlabel("Time (s)") plt.ylabel("Discharge (cms)") plt.grid(True) cmap = copy.copy(mpl.cm.get_cmap("Blues")) imshow_grid( grid, grid.at_node["surface_water__depth"], cmap=cmap, colorbar_label="Water depth (m)", ) """ Explanation: Now turn down the rain and run it a bit longer... End of explanation """
OpenSourceBrain/IzhikevichModel
numba/faster_izhikevich_model.ipynb
bsd-3-clause
import matplotlib.pyplot as plt import collections import quantities as pq import izhikevich as izhi import numpy as np %matplotlib inline from utils import reduced_cells, transform_input, plot_model DELAY = 0*pq.ms DURATION = 250 *pq.ms """ Explanation: This is a reproduction of the MATLAB script 2007.m The code is implemented in pure python. Performance of the model is atypically good as the model utilizes numba Just In Time compilation. End of explanation """ IinRange = [60,70,85,100]; params = {} params['amplitude'] = 500*pq.pA params['delay'] = DELAY params['duration'] = 600*pq.ms fig_title = 'Layer 5 regular spiking (RS) pyramidal cell (fig 8.12)' plot_model(IinRange,reduced_cells,params,cell_key='RS',title=fig_title,timed=True) IinRange = [290,370,500,550]; params = {} params['delay'] = DELAY params['duration'] = 600*pq.ms fig_title = 'Layer 5 intrinsic bursting (IB) pyramidal cell (fig 8.19)' plot_model(IinRange,reduced_cells,params,cell_key='IB',title=fig_title) IinRange = [200,300,400,600]; params = {} params['delay'] = DELAY params['duration'] = 210*pq.ms figtitle='Cortical chattering (CH) cell (fig 8.23)' plot_model(IinRange,reduced_cells,params,cell_key='CH',title=figtitle) IinRange = [100,125,200,300]; params = {} params['delay'] = DELAY T=320; figtitle = 'Low-threshold spiking (LTS) interneuron (fig 8.25)'; params['duration'] = T*pq.ms plot_model(IinRange,reduced_cells,params,cell_key='LTS',title=figtitle) T=100; params['duration'] = T*pq.ms IinRange = [73.2,100,200,400]; figtitle = 'Fast-spiking (FS) interneuron (fig 8.27) '; plot_model(IinRange,reduced_cells,params,cell_key='FS',title=figtitle) """ Explanation: The JIT model is compiled. The first model evaluation compiles the model to C code implicitly, from that point onwards evaluation speeds are fractions of ms. This is fast for python as you can see below. End of explanation """ figtitle = 'Thalamocortical (TC) cell (fig 8.31) '; Iin0 = -1200; #% required to lower Vrmp to -80mV for 120 ms IinRange = [0,50,100]; T=650; params['duration'] = T*pq.ms IinRange = transform_input(T,IinRange,Iin0,burstMode=True) plot_model(IinRange,reduced_cells,params,cell_key='TC',title=figtitle,direct=True) Iin0 = -350; IinRange = [30,50,90]; IinRange = transform_input(T,IinRange,Iin0,burstMode=True) T=650; figtitle = 'Reticular thalamic nucleus (RTN) cell (fig 8.32)'; plot_model(IinRange,reduced_cells,params,cell_key='RTN',title=figtitle,direct=True) """ Explanation: Bursting End of explanation """
LSSTC-DSFP/LSSTC-DSFP-Sessions
Sessions/Session07/Day2/Clustering-Astronomical-Sources.ipynb
mit
%matplotlib inline import numpy as np import matplotlib.pyplot as plt import glob import os from time import time from matplotlib.pyplot import imshow from matplotlib.image import imread from sklearn.cluster import KMeans from sklearn.preprocessing import MinMaxScaler, StandardScaler from sklearn import metrics from sklearn.metrics.pairwise import euclidean_distances """ Explanation: Clustering Astronomical Sources The objective of this hands-on activity is to cluster a set of candidate sources from the Zwicky Transient Facility's (ZTF) image subtraction pipeline. All candidate features and postage stamps were extracted from ZTF's public alert stream. The goal of this exercise is to become familiar with the ZTF data, the examination of some of its features, and running sklearn's KMeans algorithm on 2 or more features. Here are the steps we will take: Load data Plot Features 'elong' and 'chipsf' Run KMeans on 2 Features Feature Scaling Evaluation Results Quantitatively Evaluate Results by Examining Postage Stamps Clustering in a Dimensionally-Reduced Space 0a. Imports These are all the imports that will be used in this notebook. All should be available in the DSFP conda environment. End of explanation """ F_META = # complete F_FEATS = # complete D_STAMPS = # complete """ Explanation: 0b. Data Location You will need the following files: - dsfp_ztf_meta.npy - dsfp_ztf_feats.npy - dsfp_ztf_png_stamps.tar.gz You will need to unzip and unpack this last file (a "tarball") called dsfp_ztf_png_stamps.tar.gz. Run the following commands in the same directory as this notebook to unpack everything (note - some operating systems automatically unzip downloaded files): gunzip dsfp_ztf_png_stamps.tar.gz tar -xvf dsfp_ztf_png_stamps.tar You should now have a directory in your current working directory (cwd) called dsfp_ztf_png_stamps. Please specify the following file locations: End of explanation """ meta = np.load(F_META) feats = np.load(F_FEATS) COL_NAMES = ['diffmaglim', 'magpsf', 'sigmapsf', 'chipsf', 'magap', 'sigmagap', 'distnr', 'magnr', 'sigmagnr', 'chinr', 'sharpnr', 'sky', 'magdiff', 'fwhm', 'classtar', 'mindtoedge', 'magfromlim', 'seeratio', 'aimage', 'bimage', 'aimagerat', 'bimagerat', 'elong', 'nneg', 'nbad', 'ssdistnr', 'ssmagnr', 'sumrat', 'magapbig', 'sigmagapbig', 'ndethist', 'ncovhist', 'jdstarthist', 'jdendhist', 'scorr', 'label'] # INSTRUCTION: Verify that feats has the same number of columns as COL_NAMES # """ Explanation: 1. Load Data We are ready to get started! :) Start by loading the data and confirming that feats has the same number of columns as COL_NAMES. Please note that the last columns is a class label with values {0, 1}, where 0=bogus, and 1=real. Today we are doing unsupervised learning, but some clustering evaluation methods use labels to quantitatively measure the quality of the clustering result. End of explanation """ featnames_to_select = ['chipsf', 'elong'] # Extract the Correct Features # featidxs_to_select_indices = [ COL_NAMES.index(x) for x in featnames_to_select] feats_selected = feats[:,featidxs_to_select_indices] # Scatter Plot the Two Features # def plot_scatter(dat, xlabel, ylabel, xscale='linear', yscale='linear'): plt.plot(dat[:,0], dat[:,1], 'k.') plt.xlabel(xlabel) plt.ylabel(ylabel) plt.xscale(xscale) plt.yscale(yscale) plt.show() # Scatter Plot the Two Features # def plot_histogram(dat, bins, title, xscale='linear', yscale='linear'): plt.hist(dat, bins) plt.xscale(xscale) plt.yscale(yscale) plt.title(title) plt.show() # INSTRUCTION: Scatter Plot the Data # # INSTRUCTION: Plot the Histograms for both features. Hint, it may be helpful to plot some features on a log scale. # """ Explanation: 2. Plot Features We will perform K-means clustering using two features: 'chipsf' and 'elong'. Chipsf is the uncertainty associated with performing PSF-fit photometry. The higher the chi values, the more uncertainty associated with the source's PSF fit. Elong is a measure of how elongated the source is. A transient point source should have a spherical point spread function. An elongated point source may be a sign of a problem with image subtraction. Extract features chipsf and along from the data. Scatter plot them together, and also plot their histograms. Question: What do you notice about these features? End of explanation """ def runKMeans(dat, n_clusters=2, seed=0): return KMeans(n_clusters, random_state=seed).fit(dat) def plotKMeans(kmeans_res, reduced_dat, xlabel, ylabel, xscale='linear', yscale='linear'): # Plot the decision boundary. For that, we will assign a color to each h = .02 # point in the mesh [x_min, x_max]x[y_min, y_max]. x_min, x_max = reduced_dat[:, 0].min() - 1, reduced_dat[:, 0].max() + 1 y_min, y_max = reduced_dat[:, 1].min() - 1, reduced_dat[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Obtain labels for each point in mesh. Use last trained model. Z = kmeans_res.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.imshow(Z, interpolation='nearest', extent=(xx.min(), xx.max(), yy.min(), yy.max()), cmap=plt.cm.Paired, aspect='auto', origin='lower') plt.plot(reduced_dat[:,0], reduced_dat[:,1], 'k.') plt.scatter(kmeans_res.cluster_centers_[:, 0], kmeans_res.cluster_centers_[:, 1], marker='x', s=169, linewidths=3, color='w', zorder=10) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.xscale(xscale) plt.yscale(yscale) plt.show() # INSTRUCTION: Use the runKMeans and plotKMeans functions to cluster the data (feats_selected) # with several values of k. """ Explanation: 3. KMeans Using Two Features We rarely ever cluster only two features from a dataset. However, the advantage of doing so is that we can readily visualize two-dimensional data. Let's start off by clustering features elong and chipsf with KMeans. The plotKMeans function below implements a visualization of KMean's partitioning that was used in sklearn's KMean's demo. Question: What do you think about the quality of the clusterings produced? End of explanation """ # INSTRUCTION: Re-scale your data using either the MinMaxScaler or StandardScaler from sklearn # # INSTRUCTION: Scatter plot your rescaled data # # INSTRUCTION: Retry KMeans with the same values of k used above. # """ Explanation: 4. Feature Scaling We just discovered that distance metrics can be sensitive to the scale of your data (e.g., some features span large numeric ranges, but others don't). For machine learning methods that calculate similiarty between feature vectors, it is important to normalize data within a standard range such as (0, 1) or with z-score normalization (scaling to unit mean and variance). Fortunately, sklearn also makes this quite easy. Please review sklearn's preprocessing module options, specifically StandardScaler which corresponds to z-score normalization and MinMaxScaler. Please implement one. After your data has been scaled, scatter plot your rescaled features, and run KMeans with the transformed data. Compare the results on the transformed data with those above. End of explanation """ sample_size = 300 def bench_k_means(estimator, name, data, labels): t0 = time() estimator.fit(data) print('%-9s\t%.2fs\t%i\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f' % (name, (time() - t0), estimator.inertia_, metrics.homogeneity_score(labels, estimator.labels_), metrics.completeness_score(labels, estimator.labels_), metrics.v_measure_score(labels, estimator.labels_), metrics.adjusted_rand_score(labels, estimator.labels_), metrics.adjusted_mutual_info_score(labels, estimator.labels_), metrics.silhouette_score(data, estimator.labels_, metric='euclidean', sample_size=sample_size))) labels = feats[:,-1] print(82 * '_') print('init\t\ttime\tinertia\thomo\tcompl\tv-meas\tARI\tAMI\tsilhouette') # INSTRUCTIONS: Use the bench_k_means method to compare your clustering results # """ Explanation: 5. Quantitative Cluster Evaluation So far, we've been visually verifying our clusters. Let's use quantitative methods to verify our results. The following is a score that does not require labels: - inertia: "Sum of squared distances of samples to their closest cluster center." - Silhouette coefficient: Measures minimal inertia in ratio to distance to next nearest cluster. The score is higher are clusters become more compact and well-separated. The following scores do require labels, and are documented here. ARI, AMI measure the similarity between ground_truth labels and predicted_labels. ARI measure similarity, and AMI measures in terms of mutual information. Random assignments score close to 0, correct assignments close to 1. homogeneity: purity of the cluster (did all cluster members have the same label?). Scores in [0,1] where 0 is bad. completeness: did all labels cluster together in a single cluster? Scores in [0,1] where 0 is bad. End of explanation """ def display_stamps(candids, fig_title): # display five across num_per_row = 5 for i, candid in enumerate(candids): f_stamp = glob.glob(os.path.join(D_STAMPS, 'candid{}*.png'.format(candid)))[0] # there should only be one file returned! if (i % num_per_row) == 0: fig = plt.figure(figsize=(18, 3)) fig.suptitle(fig_title) ax = fig.add_subplot(1, num_per_row, i%num_per_row + 1) ax.set_axis_off() ax.set_title(candid) stamp = imread(f_stamp) imshow(stamp) return def closest_to_centroid(centroid, cluster_feats, cluster_candids): dists = euclidean_distances(cluster_feats, centroid.reshape(1, -1))[:,0] closest_indices = np.argsort(dists)[:10] return cluster_candids[closest_indices] def show_cluster_stamps(kmeans_res, displayMode='closest', num_to_display=10): # spits out a random selection of stamps from each cluster for i in range(kmeans_res.n_clusters): centroid = kmeans_res.cluster_centers_[i, :] mask = kmeans_res.labels_ == i cluster_candids = meta[mask]['candid'] cluster_feats = feats_selected_scaled[mask] if displayMode == 'near_centroid': selected_candids = closest_to_centroid(centroid, cluster_feats, cluster_candids) if displayMode == 'random': np.random.shuffle(cluster_candids) selected_candids = cluster_candids[:num_to_display] display_stamps(selected_candids, 'Cluster {}'.format(i)) # INSTRUCTION: Use the show_cluster_stamps method to display cutouts associated with each cluster. # Do you see similar objects in each cluster? # """ Explanation: 6. Cluster Evaluation by Visual Inspection This time with postage stamps! It can be tempting to let yourself be guided by metrics alone, and the metrics are useful guideposts that can help determine whether you're moving in the right direction. However, the goal of clustering is to reveal structure in your dataset. Fortunately, because the features were extracted from sources that were extracted from images, we can view the cutouts from each source to visually verify whether our clusters contain homogeneous objects. The display methods below give you an opportunity to display random candidates from each cluster, or the candidates that are closest to the cluster center. End of explanation """ featnames_to_select = ['chipsf', 'elong', 'diffmaglim', 'magpsf', 'sigmapsf', 'chipsf', 'magap', 'sigmagap', 'sky', 'magdiff', 'fwhm', 'mindtoedge', 'magfromlim', 'seeratio', 'aimage', 'bimage', 'aimagerat', 'bimagerat', 'elong', 'nneg', 'nbad', 'sumrat', 'magapbig', 'sigmagapbig'] # INSTRUCTION: Visualize these features. Discard any you consider to be problematic. # INSTRUCTION: Filter the feature space # INSTRUCTION: Run PCA on this feature space to reduce it to 2 principal components # INSTRUCTION: Run KMeans on this 2-dimensional PCA space, and evaluate your results both quantatively and qualitatively. """ Explanation: 7. Clustering in a Dimensionally-Reduced Space Given the tools seen above, starting clustering more than 2 features at a time. This work is free-form. I'll start you off with some suggested features. After plotting the feature distributions, you may choose to down-select further. Because we're now working with more than 2 features, use PCA to project the feature space onto its first two principal components. You may use the methods above to run KMeans in that reduced feature space and evaluate your results. End of explanation """
tpin3694/tpin3694.github.io
machine-learning/model_selection_using_grid_search.ipynb
mit
# Load libraries import numpy as np from sklearn import datasets from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline # Set random seed np.random.seed(0) """ Explanation: Title: Model Selection Using Grid Search Slug: model_selection_tuning_using_grid_search Summary: How to conduct grid search for model selection in scikit-learn for machine learning in Python. Date: 2017-09-18 12:00 Category: Machine Learning Tags: Model Selection Authors: Chris Albon <a alt="Model Selection Using Grid Search" href="https://machinelearningflashcards.com"> <img src="model_selection_tuning_using_grid_search/Model_Selection_print.png" class="flashcard center-block"> </a> Preliminaries End of explanation """ # Load data iris = datasets.load_iris() X = iris.data y = iris.target """ Explanation: Load Iris Dataset End of explanation """ # Create a pipeline pipe = Pipeline([('classifier', RandomForestClassifier())]) # Create space of candidate learning algorithms and their hyperparameters search_space = [{'classifier': [LogisticRegression()], 'classifier__penalty': ['l1', 'l2'], 'classifier__C': np.logspace(0, 4, 10)}, {'classifier': [RandomForestClassifier()], 'classifier__n_estimators': [10, 100, 1000], 'classifier__max_features': [1, 2, 3]}] """ Explanation: Create Pipeline With Model Selection Search Space Notice that we include both multiple possible learning algorithms and multiple possible hyperparameter values to search over. End of explanation """ # Create grid search clf = GridSearchCV(pipe, search_space, cv=5, verbose=0) """ Explanation: Create Model Selection Using Grid Search End of explanation """ # Fit grid search best_model = clf.fit(X, y) """ Explanation: Conduct Model Selection Using Grid Search End of explanation """ # View best model best_model.best_estimator_.get_params()['classifier'] """ Explanation: View Best Model And Its Best Hyperparameters End of explanation """ # Predict target vector best_model.predict(X) """ Explanation: Predict Using Best Model End of explanation """
marisanest/content-management
Assignment01/Assignment01.ipynb
apache-2.0
import csv import re import pandas as pd from pandas import * import numpy from numpy import * import math %matplotlib inline import matplotlib.pyplot as plt from random import randint """ Explanation: Graded Assignment 01: Titanic: Machine Learning from Disaster 2017-05-17 (c) Marisa Nest 2017 Imports End of explanation """ def features(passenger, train=False): features = {} features['Pclass'] = passenger['Pclass'] features['Title'] = getTitle(passenger['Name']) features['Rev'] = 'Rev' in passenger['Name'] features['Jr'] = 'Jr' in passenger['Name'] features['Sex'] = passenger['Sex'] features['Age'] = getAgeClass(getAge(passenger['Age'])) features['SibSp'] = passenger['SibSp'] features['Parch'] = passenger['Parch'] #features['SingleTicketNumber'] = isSingleTicketNumber(passenger['Ticket'], ticket_number_df) features['TicketPrefix'] = getTicketPrefix(passenger['Ticket']) features['Fare'] = getFareClass(getFare(passenger['Fare'])) features['Cabin'] = getCabinPrefix(passenger['Cabin']) features['Embarked'] = passenger['Embarked'] if train == True: features['Class'] = int(passenger['Survived']) return features """ Explanation: Feature-Extraktion Als erstes wird sich mit der Feature-Extraktion beschäftigt, bei der es darum geht, dem Datensatz die Eigenschaften zu entnehmen, die dafür einscheident sind, ob eine Instanze einer bestimmten Klasse (0 oder 1, bzw. überlebt oder nicht überlebt) zugehört oder nicht. Funktionen zur Feature-Extraktion features Die Funktion features extrahiert bestimmte Eigenschaften einer Instanze, die wichtig sind um zu bestimmen, welcher Klasse die Instanz zugehört. Dafür bekommt sie als Übergabeparamter eine Instanz (instance), von welcher die Eigenschaften extrahiert werden sollen und einen boolischen Wert (train), welcher besagt, ob es sich um eine Trainings-Instanz oder eine Test-Instanz handelt und somit die Klasse (0 oder 1, bzw. überlebt oder nicht überlebt) der Instanz mit extrahiert wird oder nicht. Folgende Features werden dabei extrahiert: * Pclass: Ticket-Klassse * Title: Titel * Rev: Kommt 'Rev.' im Namen vor * Jr: Kommt 'Jr' im Namen vor * Sex: Geschlecht * Age: Alter * SibSp: Anzahl von Geschwistern / Ehepartnern auf der Titanic * Parch: Anzahl von Kindern / Eltern auf der Titanic * SingleTicketNumber: Ist die Ticket-Nummer nur an eine Person vergeben worden * TicketPrefix: Ticket-Prefix * Fare: Ticket-Preis * Cabin: Kabine * Embarked: Hafen der Anbordnahme Das Feature SingleTicketNumber wurde im laufe der Bearbeitung der Aufgabe raus genommen, da dieses Feature nicht richtig extrahiert werden kann, da nicht alle vergebenen Ticket-Nummern bekannt sind und man somit nicht sicher sagen kann, ob eine Ticket-Nummer nur an eine Person vergeben wurde oder nicht. Dennoch wird es hier der Vollständigkeit halber mit aufgelistet. End of explanation """ def randomFeatures(instance, random_numbers, train=False): features = {} if train == True: features['Class'] = int(instance['Survived']) for random_number in random_numbers: if random_number == 0: features['Pclass'] = instance['Pclass'] elif random_number == 1: features['Title'] = getTitle(instance['Name']) elif random_number == 2: features['Rev'] = 'Rev' in instance['Name'] elif random_number == 3: features['Jr'] = 'Jr' in instance['Name'] elif random_number == 4: features['Sex'] = instance['Sex'] elif random_number == 5: features['Age'] = getAgeClass(getAge(instance['Age'])) elif random_number == 6: features['SibSp'] = instance['SibSp'] elif random_number == 7: features['Parch'] = instance['Parch'] #elif random_number == 8: #features['SingleTicketNumber'] = isSingleTicketNumber(passenger['Ticket'], ticket_number_df) elif random_number == 8: features['TicketPrefix'] = getTicketPrefix(instance['Ticket']) elif random_number == 9: features['Fare'] = getFareClass(getFare(instance['Fare'])) elif random_number == 10: features['Cabin'] = getCabinPrefix(instance['Cabin']) elif random_number == 11: features['Embarked'] = instance['Embarked'] return features """ Explanation: randomFeatures Die Funktion randomFeatures extrahiert bestimmte, zufällige Eigenschaften einer Instanze, die wichtig sind um zu bestimmen, welcher Klasse die Instanz zugehört. Dafür bekommt sie als Übergabeparamter eine Instanz (instance), von welcher die Eigenschaften extrahiert werden sollen, eine Reihe von zufälligen Zahlen (random_numbers), welche darüber entscheiden, welche Eigenschaften der Instanz ausgewählt werden und einen booloschen Wert (train), welcher besagt, ob es sich um eine Trainings-Instanz oder nicht handelt und somit die Klasse (0 oder 1, bzw. überlebt oder nicht überlebt) der Instanz mit extrahiert wird oder nicht. Folgende Features können dabei extrahiert werden: * Pclass: Ticket-Klassse * Title: Titel * Rev: Kommt 'Rev.' im Namen vor * Jr: Kommt 'Jr' im Namen vor * Sex: Geschlecht * Age: Alter * SibSp: Anzahl von Geschwistern / Ehepartnern auf der Titanic * Parch: Anzahl von Kindern / Eltern auf der Titanic * SingleTicketNumber: Ist die Ticket-Nummer nur an eine Person vergeben worden * TicketPrefix: Ticket-Prefix * Fare: Ticket-Preis * Cabin: Kabine * Embarked: Hafen der Anbordnahme Das Feature SingleTicketNumber wurde im laufe der Bearbeitung der Aufgabe raus genommen, da dieses Feature nicht richtig extrahiert werden kann, da nicht alle vergebenen Ticket-Nummern bekannt sind und man somit nicht sicher sagen kann, ob eine Ticket-Nummer nur an eine Person vergeben wurde oder nicht. Dennoch wird es hier der Vollständigkeit halber mit aufgelistet. End of explanation """ def getTitle(name): if name == '': return 'unknown' elif 'Miss' in name: return 'Miss' elif 'Mrs' in name: return 'Mrs' elif 'Master' in name: return 'Master' elif 'Mr' in name: return 'Mr' else: return 'none' """ Explanation: Hilfs-Funktionen zur Feature-Extraktion Die folgenden Funktionen dienen den beiden Funktionen features und randomFeatures als Hilfe, um die Features besser extrahieren zu können. getTitle Die Funktion getTitle bekommt einen String (name) übergeben und prüft, ob in dem String ein Titel vorkommt und gibt diesen Titel dann zurück. Mögliche Titel sind dabei 'Miss', 'Mrs', 'Master' und 'Mr'. Sollte keiner der Titel vorkommen wird 'none' zurückgegeben, sollte der String leer sein wird 'unknown' zurückgegeben. End of explanation """ def getAge(age): if age == '': return -1 else: return int(float(age)) """ Explanation: getAge Die Funktion getAge bekommt einen String (age) übergeben und prüft, ob der String leer ist. Ist dies der Fall wird -1 zurückgegeben, ansonsten wird der String erst zu einer Float-Zahl gecastet und anschließend zu einem Integer, um so eine sinnvolle Rundung vorzunehmen. End of explanation """ def getAgeClass(age): if age == -1: return 'unknown' elif age >= 0 and age <= 15: return 'child' elif age > 15 and age <= 23: return 'young person' elif age > 23 and age <= 36: return 'young adult' elif age > 36 and age <= 54: return 'adult' elif age > 54 and age <= 63: return 'old adult' elif age > 63: return 'old person' else: return 'undefined' """ Explanation: getAgeClass Die Funktion getAgeClass bekommt eine Integer-Zahl (age) übergeben und ordnet diese Zahl einer vordefinierten Klasse zu. Die Klassen sind dabei wie folgt eingeteilt: * 'unknown': age == -1 * 'child': age >= 0 and age <= 15 * 'young person': age > 15 and age <= 23 * 'young adult': age > 23 and age <= 36 * 'adult': age > 36 and age <= 54 * 'old adult': age > 54 and age <= 63 * 'old person': age > 63 * alles was keiner dieser Klassen zugeordnet werden kann ist 'undefined' Um die Klassen sinnvoll festlegen zu können, wurden als erstes alle Alters-Daten in der Rohform und nach dem Casting zum Interger graphisch visualisiert. End of explanation """ def getTicketPrefix(ticket): if ticket == '': return 'unknown' else: if ticket == 'LINE': return ticket else: ticket = ticket.upper() ticket = re.sub(r"[0-9]*$", "", ticket) ticket = re.sub(r"\.", "", ticket) ticket = re.sub(r"\s*", "", ticket) ticket = re.sub(r"\/*", "", ticket) if ticket != '': return ticket else: return 'none' """ Explanation: getTicketPrefix Die Funktion getTicketPrefix bekommt einen String (ticket) übergeben und parst aus diesem das Prefix heraus. Das Perfix definiert sich dabei als der Teil, der vor der eigentlichen Ticket-Nummer steht. Sollte der String leer sein oder nur aus der Zeichenkette 'LINE' bestehen, wird beim ersten Fall 'unknown' und beim zweiten Fall 'LINE' zurückgegeben. Ansonsten werden Sonderzeichen wie Punkte, Leerzeichen und Back-Slashes aus dem String heraus geparst. Der resultierende String wird zurückgegeben, außer er ist leer, dann wird 'none' zurückgegeben. End of explanation """ def getFare(fare): if fare == '': return -1 else: return int(float(fare)) """ Explanation: getFare Die Funktion getFare bekommt einen String (fare) übergeben und prüft, ob der String leer ist. Ist dies der Fall wird -1 zurückgegeben, ansonsten wird der String erst zu einer Float-Zahl und anschließend zu einem Integer gecastet, um so eine sinnvolle Rundung vorzunehmen. End of explanation """ def getFareClass(fare): if fare == -1: return 'unknown' elif fare >= 0 and fare <= 10: return 'cheap' elif fare > 10 and fare <= 26: return 'economical' elif fare > 26 and fare <= 50: return 'normal' elif fare > 50 and fare <= 79: return 'expensive' elif fare > 79: return 'first class' else: return 'undefined' """ Explanation: getFareClass Die Funktion getFareClass bekommt eine Integer-Zahl (fare) übergeben und ordnet diese Zahl einer vordefinierten Klasse zu. Die Klassen sind dabei wie folgt eingeteilt: * 'unknown': age == -1 * 'cheap': fare >= 0 and fare <= 10 * 'economical': fare > 10 and fare <= 26 * 'normal': fare > 26 and fare <= 50 * 'expensive': fare > 50 and fare <= 79 * 'first class': fare > 79 * alles was keiner dieser Klassen zugeordnet werden kann ist 'undefined' Um die Klassen sinnvoll festlegen zu können, wurden als erstes alle Fare-Daten in der Rohform und nach dem Casting zum Interger graphisch visualisiert. End of explanation """ def getCabinPrefix(cabin): if cabin == '': return 'unknown' else: cabin = re.sub(r"\s*", "", cabin) cabin = re.sub(r"[0-9]*$", "", cabin) if cabin != '': return cabin else: return 'none' """ Explanation: getCabinPrefix Die Funktion getCabinPrefix bekommt einen String (cabin) übergeben und parst aus diesem das Prefix heraus. Das Perfix definiert sich dabei als der Teil, der vor der eigentlichen Kabinien-Nummer steht. Sollte der String leer sein, wird 'unknown' zurückgegeben. Ansonsten werden alle Leerzeichen und alle Zahlen, die am Ende stehen, heraus geparst. Der resultierende String wird zurückgegeben, außer er ist leer, dann wird 'none' zurückgegeben. End of explanation """ def initTicketNumberDf(): arr = [] with open('train.csv') as csvfile: csv_dict = csv.DictReader(csvfile, delimiter=',', quotechar='"') ([arr.append(row['Ticket']) for row in csv_dict]) regex_alp = re.compile(r"[a-zA-Z]*", re.IGNORECASE) regex_pun = re.compile(r"\.", re.IGNORECASE) regex_sla = re.compile(r"\/", re.IGNORECASE) regex_num = re.compile(r"^[0-9]* ", re.IGNORECASE) regex_spa = re.compile(r"\s*", re.IGNORECASE) new_arr = [] for row in arr: tmp_row = regex_alp.sub("", row) tmp_row = regex_pun.sub("", tmp_row) tmp_row = regex_sla.sub("", tmp_row) tmp_row = regex_num.sub("", tmp_row) tmp_row = regex_spa.sub("", tmp_row) if tmp_row != '': new_arr.append(tmp_row) df = pd.DataFrame(new_arr) df = df.apply(pd.value_counts) return df """ Explanation: initTicketNumberDf Die Funktion initTicketNumberDf erzeugt ein Dataframe mit allen bekannten Ticket-Nummern (ohne Prefix, das wird zuvor raus geparst) und gibt dieses zurück. End of explanation """ def isSingleTicketNumber(ticket, ticket_number_df): if ticket == '': return 'unknown' else: ticket = re.sub(r"[a-zA-Z]*", "", ticket) ticket = re.sub(r"\.", "", ticket) ticket = re.sub(r"\/", "", ticket) ticket = re.sub(r"^[0-9]* ", "", ticket) ticket = re.sub(r"\s*", "", ticket) if ticket != '': return (int((ticket_number_df.loc[ticket, :])[0]) == 1) else: return 'none' """ Explanation: isSingleTicketNumber Die Funktion isSingleTicketNumber bekommt ein String (ticket) übergeben, ist dieser String leer, wird 'unknown' zurück gegeben, ansonsten wird die Ticket-Nummer aus dem Ticket geparst. Anschließend wird in dem ebenfalls übergebenen DataFrame ticket_number_df geguckt, ob die Nummer bereits existiert oder nicht. Jenachdem wird True oder False zurückgegeben. Sollte nach dem Parsen nur noch ein leerer String übrig sein, so wird 'none' zurückgegeben End of explanation """ train_dict = csv.DictReader(open('train.csv', 'r')) train_df = DataFrame([features(p, True) for p in train_dict]) survived = train_df.query('Class==1') not_survived = train_df.query('Class==0') """ Explanation: Plots zur Visualisierung der extrahierten Features Die folgeneden Plots sind dazu da, um sich ein Bild darüber zu machen, wie entscheidend ein bestimmtes Feature ist, wenn es darum geht, ob ein Passagier überlebt hat oder nicht. Bei der Bearbeitung der Aufgabe, wurd bei der Feature-Extraktion verschiedenes ausprobiert und anschließend durch die Plots geprüft. Die Plots sind dabei so aufgebaut, dass an der x-Achse alle Ausprägungen eines Features zu finden sind. Für jede Ausprägung eines Features gibt es dann jeweils zwei Balken: Der blaue Balken für die Pasagiere, die die Auspägung hatten und nicht überlebt haben und der orange Balken für die Pasagiere, die die Auspägung hatten und überlebt haben. Die y-Achse gibt an, wie viele Pasagiere mit einer bestimmten Ausprägung jeweils überlebt oder nicht überlebt haben. Vorbereitung der beiden Datensätze survived und not_survived, welche einemal alle überlebenden Passagiere und einmal alle nicht überlebenden Passagiere beinhalten. End of explanation """ x = DataFrame({"Survived": Series(survived.groupby(['Pclass']).size()), "NotSurvived": Series(not_survived.groupby(['Pclass']).size())}) ax = x.plot(kind='bar') patches, labels = ax.get_legend_handles_labels() ax.legend(patches, labels, loc='best') """ Explanation: Pclass-Balken-Diagramm End of explanation """ x = DataFrame({"Survived": Series(survived.groupby(['Title']).size()), "NotSurvived": Series(not_survived.groupby(['Title']).size())}) ax = x.plot(kind='bar') patches, labels = ax.get_legend_handles_labels() ax.legend(patches, labels, loc='best') """ Explanation: Name Title-Balken-Diagramm End of explanation """ x = DataFrame({"Survived": Series(survived.groupby(['Rev']).size()), "NotSurvived": Series(not_survived.groupby(['Rev']).size())}) ax = x.plot(kind='bar') patches, labels = ax.get_legend_handles_labels() ax.legend(patches, labels, loc='best') """ Explanation: Rev-Balken-Diagramm End of explanation """ x = DataFrame({"Survived": Series(survived.groupby(['Jr']).size()), "NotSurvived": Series(not_survived.groupby(['Jr']).size())}) ax = x.plot(kind='bar') patches, labels = ax.get_legend_handles_labels() ax.legend(patches, labels, loc='best') """ Explanation: Jr-Balken-Diagramm End of explanation """ x = DataFrame({"Survived": Series(survived.groupby(['Sex']).size()), "NotSurvived": Series(not_survived.groupby(['Sex']).size())}) ax = x.plot(kind='bar') patches, labels = ax.get_legend_handles_labels() ax.legend(patches, labels, loc='best') """ Explanation: Sex-Balken-Diagramm End of explanation """ x = DataFrame({"Survived": Series(survived.groupby(['Age']).size()), "NotSurvived": Series(not_survived.groupby(['Age']).size())}) ax = x.plot(kind='bar') patches, labels = ax.get_legend_handles_labels() ax.legend(patches, labels, loc='best') """ Explanation: Age-Balken-Diagramm End of explanation """ x = DataFrame({"Survived": Series(survived.groupby(['SibSp']).size()), "NotSurvived": Series(not_survived.groupby(['SibSp']).size())}) ax = x.plot(kind='bar') patches, labels = ax.get_legend_handles_labels() ax.legend(patches, labels, loc='best') """ Explanation: SibSp-Balken-Diagramm End of explanation """ x = DataFrame({"Survived": Series(survived.groupby(['Parch']).size()), "NotSurvived": Series(not_survived.groupby(['Parch']).size())}) ax = x.plot(kind='bar') patches, labels = ax.get_legend_handles_labels() ax.legend(patches, labels, loc='best') """ Explanation: Parch-Balken-Diagramm End of explanation """ #x = DataFrame({"Survived": Series(survived.groupby(['SingleTicketNumber']).size()), "NotSurvived": Series(not_survived.groupby(['SingleTicketNumber']).size())}) #ax = x.plot(kind='bar') #patches, labels = ax.get_legend_handles_labels() #ax.legend(patches, labels, loc='best') """ Explanation: Ticket SingleTicketNumber-Balken-Diagramm End of explanation """ x = DataFrame({"Survived": Series(survived.groupby(['TicketPrefix']).size()), "NotSurvived": Series(not_survived.groupby(['TicketPrefix']).size())}) ax = x.plot(kind='bar') patches, labels = ax.get_legend_handles_labels() ax.legend(patches, labels, loc='best') """ Explanation: TicketPrefix-Balken-Diagramm End of explanation """ x = DataFrame({"Survived": Series(survived.groupby(['Fare']).size()), "NotSurvived": Series(not_survived.groupby(['Fare']).size())}) ax = x.plot(kind='bar') patches, labels = ax.get_legend_handles_labels() ax.legend(patches, labels, loc='best') """ Explanation: Fare-Balken-Diagramm End of explanation """ x = DataFrame({"Survived": Series(survived.groupby(['Cabin']).size()), "NotSurvived": Series(not_survived.groupby(['Cabin']).size())}) ax = x.plot(kind='bar') patches, labels = ax.get_legend_handles_labels() ax.legend(patches, labels, loc='best') """ Explanation: Cabin-Balken-Diagramm End of explanation """ x = DataFrame({"Survived": Series(survived.groupby(['Embarked']).size()), "NotSurvived": Series(not_survived.groupby(['Embarked']).size())}) ax = x.plot(kind='bar') patches, labels = ax.get_legend_handles_labels() ax.legend(patches, labels, loc='best') """ Explanation: Embarked-Balken-Diagramm End of explanation """ class NaiveBayesClassifier: def __init__(self): print('New Naive Bayes Classifier initalized...') def train(self, train_set): self.train_set = DataFrame(train_set) self.classes = np.unique(self.train_set['Class'].values.ravel()) self.all_instances_with_class = {} self.probability_of_classes = {} for class_ in self.classes: self.all_instances_with_class[class_] = self.train_set.groupby(['Class']).size().loc[class_] self.probability_of_classes[class_] = self.all_instances_with_class[class_] / self.train_set.shape[0] self.number_of_different_forms_of_features = {} for feature in self.train_set.keys(): self.number_of_different_forms_of_features[feature] = self.train_set.groupby([feature]).size().count() def classify(self, instance): probabilities_of_classes_provided_by_features = {} for class_ in self.classes: # Frage: Sollte nicht auch von self.probability_of_classes[class_] der Logarithmus genommen werden? probability_of_class_provided_by_features = log(self.probability_of_classes[class_]) probability_of_features_provided_by_class = 0 for feature in instance: if feature != 'Class' : try: all_instances_with_class_and_feature = self.train_set.groupby(['Class', feature]).size().loc[class_].loc[instance[feature]] except KeyError: all_instances_with_class_and_feature = 0 probability_of_features_provided_by_class += log((all_instances_with_class_and_feature + 1) / (self.all_instances_with_class[class_] + self.number_of_different_forms_of_features[feature])) probability_of_class_provided_by_features += probability_of_features_provided_by_class probabilities_of_classes_provided_by_features[class_] = probability_of_class_provided_by_features best_class = max(probabilities_of_classes_provided_by_features, key=probabilities_of_classes_provided_by_features.get) return best_class def accuracy(self, test_set): tp_tn = 0 for instance in test_set: result = self.classify(instance) if result == instance['Class']: tp_tn += 1 return tp_tn / DataFrame(test_set).shape[0] def recall(self, test_set): tp = 0 for instance in test_set: result = self.classify(instance) if result == instance['Class'] and instance['Class'] == 1: tp += 1 return tp / DataFrame(test_set).groupby(['Class']).size().loc[1] def fpRate(self, test_set): fp = 0 for instance in test_set: result = self.classify(instance) if result != instance['Class'] and instance['Class'] == 0: fp += 1 return fp / DataFrame(test_set).groupby(['Class']).size().loc[0] def precision(self, test_set): tp = 0 fp = 0 for instance in test_set: result = self.classify(instance) if result == instance['Class'] and instance['Class'] == 1: tp += 1 elif result != instance['Class'] and instance['Class'] == 0: fp += 1 return tp /(tp + fp) def f1Score(self, test_set): precision = self.precision(test_set) recall = self.recall(test_set) return (2 * precision * recall)/(precision + recall) """ Explanation: Klassifizierung Im Folgenden wird ein Naive-Bayes-Klassifizierer implementiert, der sich mittels eines Trainings-Datensatzes trainieren lässt und anschließend in der Lage ist, nicht bekannte Daten zu klassifizieren. Als Algorithmus wird hier das Bayes' Theorem verwendet. Bayes' Theorem Naive-Bayes-Klassifizierer werden gerne im Bereich des Machine-Learning benutz und gehoren dort zu der Familie der eher einfachen wahrscheinlichkeitstheoretisch Klassifizierern. Der Klassifizierer baut dabei auch das Bayes' Theorem auf. Das Bayes' Theorem lässt sich wie folgt als Formel darstellen: $$p(c|x) = \frac{p(x|c) * p(c)}{p(x)}$$ Wobei sich p(x|c) wie folgt berechnet: $$p(x|c) = \prod_{i=1}^{n} p(x_i|c)$$ Zu beachten ist hierbei, dass davon ausgegenagen wird, dass die einzelenen Features (in der Formel x_i) von einander unabhägig sind. Naive-Bayes-Klassifizierer Im folgenden ist eine Klasse implementiert, welche einen Naive-Bayes-Klassifizierer representiert. Dabei wurde die zuvor erwähnte Fromel verwendet, wobei noch ein Smoothing hinzugefügt wurde und mit dem Logarithmus gerechnet wurde, um zu kleine Werte und somit ein Floating-Point-Underflow zu vermeiden. Da man zudem die Teilung duch p(x) weglassen kann, da diese bei allen Berechnungen gleichermaßen passiert, ergibt sich die folgende Formel: $$p(c|x) = p(x|c) * p(c)$$ Wobei sich p(x|c) wie folgt berechnet: $$p(x|c) = \sum_{i=1}^{n} log(p(x_i|c))$$ und wobei sich p(x_i|c) wie folgt berechnet: $$p(x_i|c) = \frac{p(x_i \bigwedge c) + 1}{p(c) + m}$$ Diese Formel ist so auch in dem Klassifizierer implementiert. Folgende Methoden sind in der Klasse NaiveBayesClassifier enthalten: 1. init: Macht lediglich eine Konsolen-Ausgabe, um zu zeigen, dass ein Instanz erzeugt wurde 2. train: Trainiert den Klassifizierer. Dabei wird zum einen der übergebene Trainigs-Datensatz abgespeichert und verschiedene Werte vorberechnet. Vorberechente Werte sind: ein Array, besthend aus allen verschiedenen Klassen, die Anzahl der Instanzen mit einer bestimmten Klasse, die Wahrscheinlichkeiten, das eine Instanz eine bestimmte Klasse hat und zu jedem, unterschidlichen Feature, die Anzahl von unterschiedlichen Ausprägungen eines Features 3. classify: Klassifiziert Instanzen mit hilfe des zuvor dargestellten Bayes' Theorems. Rückgabewert ist die Klasse, mit der höchsten Wahrscheinlichkeit 4. accuracy: Berechnet den Accuracy-Wert des Klassifizierers 5. recall: Berechnet den Recall-Wert des Klassifizierers 6. fpRate: Berechnet die FP-Rate des Klassifizierers 7. precision: Berechnet den Precision-Wert des Klassifizierers 8. f1Score: Berechnet den F1-Score des Klassifizierers End of explanation """ train_dict = csv.DictReader(open('train.csv', 'r')) train_set = ([features(p, True) for p in train_dict]) size = int(len(train_set) * 0.12) train_set, dev_set = train_set[size:], train_set[:size] """ Explanation: Der Klassifizierer in Kombination mit der Feature-Extraktions-Methode features Trainieren und Testen 1.1. Vorberitung der Datensätze Als erstes werden der Trainings- und Development-Datensatz vorbereitet. Der Trainings-Datensatz soll dabei vom Development-Datensatz verschiedene Daten beinhalten, um so auch einschätzen zu können, ob der Klassifizierer gut funktioniert. Größe des Development-Datensatz soll bei min. 100 Instanzen liegen. End of explanation """ nbc = NaiveBayesClassifier() nbc.train(train_set) """ Explanation: 1.2. Trainieren des Klassifizierers Als nächstest wird der Klassifizierer mit dem Trainings-Datensatz trainiert. End of explanation """ print('Accuracy: ', nbc.accuracy(dev_set)) print('Recall: ', nbc.recall(dev_set)) print('FP-Rate: ', nbc.fpRate(dev_set)) print('Precision: ', nbc.precision(dev_set)) print('F1-Score: ', nbc.f1Score(dev_set)) """ Explanation: 1.3. Testen des Klassifizierers Zu guter Letzt wird der Klassifizierer mit dem Development-Datensatz und folgenden Methoden überprüft: * accuracy * recall * fpRate * precision * f1Score End of explanation """ train_dict = csv.DictReader(open('train.csv', 'r')) train_set = ([features(p, True) for p in train_dict]) test_dict = csv.DictReader(open('test.csv', 'r')) """ Explanation: 2.. Klassifizieren von unbekannten Daten 2.1. Vorberitung der Datensätze Als erstes werden der Trainings- und Test-Datensatz vorbereitet. Der Trainings-Datensatz wird diesmal komplett (ohne zusäzlichen Development-Datensatz) zum trainieren des Klassifizieres benutzt. Der Test-Datensatz beinhaltet die Daten welche fremd sind und klassifiziert werden sollen. End of explanation """ nbc = NaiveBayesClassifier() nbc.train(train_set) """ Explanation: 2.2. Trainieren des Klassifizierers Als nächstest wird der Klassifizierer mit dem Trainings-Datensatz trainiert. End of explanation """ with open('submission.csv', 'w', newline='') as csvfile: writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) writer.writerow(['PassengerId', 'Survived']) for passanger in test_dict: passanger_features = features(passanger, False) class_ = nbc.classify(passanger_features) writer.writerow([passanger['PassengerId'], str(class_)]) """ Explanation: 2.3 Klassifizieren der fremden Daten Als letztes werden alle Instanzen des Test-Datensatzes klassifiziert und das Ergebnis wird in eine CSV-Datei geschreiben. End of explanation """ selected_classes_for_instances_from_all_classifiers = [] for i in range(100): random_number = randint(3,10) random_numbers = [] for j in range(random_number): tmp_random_number = randint(0,11) random_numbers.append(tmp_random_number) train_dict = csv.DictReader(open('train.csv', 'r')) train_set = ([randomFeatures(p, random_numbers, True) for p in train_dict]) size = int(len(train_set) * 0.12) train_set, dev_set = train_set[size:], train_set[:size] nbc = NaiveBayesClassifier() nbc.train(train_set) accuracy = nbc.accuracy(dev_set) print('With accuracy: ', accuracy) print('With features:', DataFrame(train_set).keys()) if accuracy > 0.7: train_dict = csv.DictReader(open('train.csv', 'r')) train_set = ([randomFeatures(p, random_numbers, True) for p in train_dict]) nbc = NaiveBayesClassifier() nbc.train(train_set) test_dict = csv.DictReader(open('test.csv', 'r')) for instance in test_dict: instance_features = randomFeatures(instance, random_numbers, False) selected_classes_for_instances_from_all_classifiers.append({'PassengerId': instance['PassengerId'], 'Class': nbc.classify(instance_features)}) if len(selected_classes_for_instances_from_all_classifiers) > 0: grouped_and_counted_classes_for_instances = DataFrame(selected_classes_for_instances_from_all_classifiers).groupby(['PassengerId', 'Class']).size() """ Explanation: Der Klassifizierer in Kombination mit der Feature-Extraktions-Methode randomFeatures Trainieren und Klassifizieren mittels Ensemble-Klassifizierung Da es nicht sehr effektiv wäre die Feature-Extraktions-Methode randomFeatures nur für einmaliges Klassifizieren zu benutzen und wahrscheinlich in vielen Fällen zu schlechteren Ergebnissen führen würde, als die Methode features, wird hier eine Art Ensemble-Klassifizierer implementiert. Dabei werden mehrere Klassifizierer verwendet, wobei jeder einzelne eine Instanz auf Grund von unterschiedlichen Features einer Klassse zuordnet. Sofern der Klassifizierer einen höheren Accuracy-Wert als 0.7 hat (eigentlich würden 0.5 reichen, aber es wurde sich hier für einen höheren Wert entscheiden) wird sein Ergebnis, also die Klasse, in das endgültige Ergebnis mit einberechnet. Am Ende wird dann zusammengezählt, wie oft eine bestimmte Klasse ausgewählt wurde und die, die am häufigsten auftaucht wird dann als Endergebnis genommen. End of explanation """ if len(selected_classes_for_instances_from_all_classifiers) > 0: with open('submission_random_features.csv', 'w', newline='') as csvfile: writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) writer.writerow(['PassengerId', 'Survived']) test_dict = csv.DictReader(open('test.csv', 'r')) for instance in test_dict: try: not_survived = grouped_and_counted_classes_for_instances.loc[instance['PassengerId'], 0] except Exception: not_survived = 0 try: survived = grouped_and_counted_classes_for_instances.loc[instance['PassengerId'], 1] except Exception: survived = 0 if not_survived > survived: writer.writerow([instance['PassengerId'], str(0)]) else: writer.writerow([instance['PassengerId'], str(1)]) """ Explanation: 2.. Speichern des Ergebnisses Zu guter Letzt wird das Ergbeniss noch im richtigen Format in einem CSV-File gespeichert. End of explanation """
tensorflow/docs
site/en/tutorials/images/classification_with_model_garden.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2020 The TensorFlow Authors. End of explanation """ !pip uninstall -y opencv-python !pip install -U -q "tensorflow>=2.9.0" "tf-models-official" """ Explanation: Image classification with Model Garden <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tutorials/images/classification_with_model_garden"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/images/classification_with_model_garden.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/images/classification_with_model_garden.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/images/classification_with_model_garden.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> This tutorial fine-tunes a Residual Network (ResNet) from the TensorFlow Model Garden package (tensorflow-models) to classify images in the CIFAR dataset. Model Garden contains a collection of state-of-the-art vision models, implemented with TensorFlow's high-level APIs. The implementations demonstrate the best practices for modeling, letting users to take full advantage of TensorFlow for their research and product development. This tutorial uses a ResNet model, a state-of-the-art image classifier. This tutorial uses the ResNet-18 model, a convolutional neural network with 18 layers. This tutorial demonstrates how to: 1. Use models from the TensorFlow Models package. 2. Fine-tune a pre-built ResNet for image classification. 3. Export the tuned ResNet model. Setup Install and import the necessary modules. This tutorial uses the tf-models-nightly version of Model Garden. Note: Upgrading TensorFlow to 2.9 in Colab breaks GPU support, so this colab is set to run on CPU until the Colab runtimes are updated. End of explanation """ import pprint import tempfile from IPython import display import matplotlib.pyplot as plt import tensorflow as tf import tensorflow_datasets as tfds """ Explanation: Import TensorFlow, TensorFlow Datasets, and a few helper libraries. End of explanation """ import tensorflow_models as tfm # These are not in the tfm public API for v2.9. They will be available in v2.10 from official.vision.serving import export_saved_model_lib import official.core.train_lib """ Explanation: The tensorflow_models package contains the ResNet vision model, and the official.vision.serving model contains the function to save and export the tuned model. End of explanation """ exp_config = tfm.core.exp_factory.get_exp_config('resnet_imagenet') tfds_name = 'cifar10' ds_info = tfds.builder(tfds_name ).info ds_info """ Explanation: Configure the ResNet-18 model for the Cifar-10 dataset The CIFAR10 dataset contains 60,000 color images in mutually exclusive 10 classes, with 6,000 images in each class. In Model Garden, the collections of parameters that define a model are called configs. Model Garden can create a config based on a known set of parameters via a factory. Use the resnet_imagenet factory configuration, as defined by tfm.vision.configs.image_classification.image_classification_imagenet. The configuration is set up to train ResNet to converge on ImageNet. End of explanation """ # Configure model exp_config.task.model.num_classes = 10 exp_config.task.model.input_size = list(ds_info.features["image"].shape) exp_config.task.model.backbone.resnet.model_id = 18 # Configure training and testing data batch_size = 128 exp_config.task.train_data.input_path = '' exp_config.task.train_data.tfds_name = tfds_name exp_config.task.train_data.tfds_split = 'train' exp_config.task.train_data.global_batch_size = batch_size exp_config.task.validation_data.input_path = '' exp_config.task.validation_data.tfds_name = tfds_name exp_config.task.validation_data.tfds_split = 'test' exp_config.task.validation_data.global_batch_size = batch_size """ Explanation: Adjust the model and dataset configurations so that it works with Cifar-10 (cifar10). End of explanation """ logical_device_names = [logical_device.name for logical_device in tf.config.list_logical_devices()] if 'GPU' in ''.join(logical_device_names): print('This may be broken in Colab.') device = 'GPU' elif 'TPU' in ''.join(logical_device_names): print('This may be broken in Colab.') device = 'TPU' else: print('Running on CPU is slow, so only train for a few steps.') device = 'CPU' if device=='CPU': train_steps = 20 exp_config.trainer.steps_per_loop = 5 else: train_steps=5000 exp_config.trainer.steps_per_loop = 100 exp_config.trainer.summary_interval = 100 exp_config.trainer.checkpoint_interval = train_steps exp_config.trainer.validation_interval = 1000 exp_config.trainer.validation_steps = ds_info.splits['test'].num_examples // batch_size exp_config.trainer.train_steps = train_steps exp_config.trainer.optimizer_config.learning_rate.type = 'cosine' exp_config.trainer.optimizer_config.learning_rate.cosine.decay_steps = train_steps exp_config.trainer.optimizer_config.learning_rate.cosine.initial_learning_rate = 0.1 exp_config.trainer.optimizer_config.warmup.linear.warmup_steps = 100 """ Explanation: Adjust the trainer configuration. End of explanation """ pprint.pprint(exp_config.as_dict()) display.Javascript("google.colab.output.setIframeHeight('300px');") """ Explanation: Print the modified configuration. End of explanation """ logical_device_names = [logical_device.name for logical_device in tf.config.list_logical_devices()] if exp_config.runtime.mixed_precision_dtype == tf.float16: tf.keras.mixed_precision.set_global_policy('mixed_float16') if 'GPU' in ''.join(logical_device_names): distribution_strategy = tf.distribute.MirroredStrategy() elif 'TPU' in ''.join(logical_device_names): tf.tpu.experimental.initialize_tpu_system() tpu = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='/device:TPU_SYSTEM:0') distribution_strategy = tf.distribute.experimental.TPUStrategy(tpu) else: print('Warning: this will be really slow.') distribution_strategy = tf.distribute.OneDeviceStrategy(logical_device_names[0]) """ Explanation: Set up the distribution strategy. End of explanation """ with distribution_strategy.scope(): model_dir = tempfile.mkdtemp() task = tfm.core.task_factory.get_task(exp_config.task, logging_dir=model_dir) tf.keras.utils.plot_model(task.build_model(), show_shapes=True) for images, labels in task.build_inputs(exp_config.task.train_data).take(1): print() print(f'images.shape: {str(images.shape):16} images.dtype: {images.dtype!r}') print(f'labels.shape: {str(labels.shape):16} labels.dtype: {labels.dtype!r}') """ Explanation: Create the Task object (tfm.core.base_task.Task) from the config_definitions.TaskConfig. The Task object has all the methods necessary for building the dataset, building the model, and running training & evaluation. These methods are driven by tfm.core.train_lib.run_experiment. End of explanation """ plt.hist(images.numpy().flatten()); """ Explanation: Visualize the training data The dataloader applies a z-score normalization using preprocess_ops.normalize_image(image, offset=MEAN_RGB, scale=STDDEV_RGB), so the images returned by the dataset can't be directly displayed by standard tools. The visualization code needs to rescale the data into the [0,1] range. End of explanation """ label_info = ds_info.features['label'] label_info.int2str(1) """ Explanation: Use ds_info (which is an instance of tfds.core.DatasetInfo) to lookup the text descriptions of each class ID. End of explanation """ def show_batch(images, labels, predictions=None): plt.figure(figsize=(10, 10)) min = images.numpy().min() max = images.numpy().max() delta = max - min for i in range(12): plt.subplot(6, 6, i + 1) plt.imshow((images[i]-min) / delta) if predictions is None: plt.title(label_info.int2str(labels[i])) else: if labels[i] == predictions[i]: color = 'g' else: color = 'r' plt.title(label_info.int2str(predictions[i]), color=color) plt.axis("off") plt.figure(figsize=(10, 10)) for images, labels in task.build_inputs(exp_config.task.train_data).take(1): show_batch(images, labels) """ Explanation: Visualize a batch of the data. End of explanation """ plt.figure(figsize=(10, 10)); for images, labels in task.build_inputs(exp_config.task.validation_data).take(1): show_batch(images, labels) """ Explanation: Visualize the testing data Visualize a batch of images from the validation dataset. End of explanation """ model, eval_logs = tfm.core.train_lib.run_experiment( distribution_strategy=distribution_strategy, task=task, mode='train_and_eval', params=exp_config, model_dir=model_dir, run_post_eval=True) tf.keras.utils.plot_model(model, show_shapes=True) """ Explanation: Train and evaluate End of explanation """ for key, value in eval_logs.items(): print(f'{key:20}: {value.numpy():.3f}') """ Explanation: Print the accuracy, top_5_accuracy, and validation_loss evaluation metrics. End of explanation """ for images, labels in task.build_inputs(exp_config.task.train_data).take(1): predictions = model.predict(images) predictions = tf.argmax(predictions, axis=-1) show_batch(images, labels, tf.cast(predictions, tf.int32)) if device=='CPU': plt.suptitle('The model was only trained for a few steps, it is not expected to do well.') """ Explanation: Run a batch of the processed training data through the model, and view the results End of explanation """ # Saving and exporting the trained model export_saved_model_lib.export_inference_graph( input_type='image_tensor', batch_size=1, input_image_size=[32, 32], params=exp_config, checkpoint_path=tf.train.latest_checkpoint(model_dir), export_dir='./export/') """ Explanation: Export a SavedModel The keras.Model object returned by train_lib.run_experiment expects the data to be normalized by the dataset loader using the same mean and variance statiscics in preprocess_ops.normalize_image(image, offset=MEAN_RGB, scale=STDDEV_RGB). This export function handles those details, so you can pass tf.uint8 images and get the correct results. End of explanation """ # Importing SavedModel imported = tf.saved_model.load('./export/') model_fn = imported.signatures['serving_default'] """ Explanation: Test the exported model. End of explanation """ plt.figure(figsize=(10, 10)) for data in tfds.load('cifar10', split='test').batch(12).take(1): predictions = [] for image in data['image']: index = tf.argmax(model_fn(image[tf.newaxis, ...])['logits'], axis=1)[0] predictions.append(index) show_batch(data['image'], data['label'], predictions) if device=='CPU': plt.suptitle('The model was only trained for a few steps, it is not expected to do better than random.') """ Explanation: Visualize the predictions. End of explanation """
tpin3694/tpin3694.github.io
machine-learning/select_best_number_of_components_in_lda.ipynb
mit
# Load libraries from sklearn import datasets from sklearn.discriminant_analysis import LinearDiscriminantAnalysis """ Explanation: Title: Selecting The Best Number Of Components For LDA Slug: select_best_number_of_components_in_lda Summary: How to select the best number of components for linear discriminant analysis for dimensionality reduction using Python. Date: 2017-09-13 12:00 Category: Machine Learning Tags: Feature Engineering Authors: Chris Albon In scikit-learn, LDA is implemented using LinearDiscriminantAnalysis includes a parameter, n_components indicating the number of features we want returned. To figure out what argument value to use with n_components (e.g. how many parameters to keep), we can take advantage of the fact that explained_variance_ratio_ tells us the variance explained by each outputted feature and is a sorted array. Specifically, we can run LinearDiscriminantAnalysis with n_components set to None to return ratio of variance explained by every component feature, then calculate how many components are required to get above some threshold of variance explained (often 0.95 or 0.99). Preliminaries End of explanation """ # Load the Iris flower dataset: iris = datasets.load_iris() X = iris.data y = iris.target """ Explanation: Load Iris Data End of explanation """ # Create and run an LDA lda = LinearDiscriminantAnalysis(n_components=None) X_lda = lda.fit(X, y) """ Explanation: Run Linear Discriminant Analysis End of explanation """ # Create array of explained variance ratios lda_var_ratios = lda.explained_variance_ratio_ """ Explanation: Create List Of Explained Variances End of explanation """ # Create a function def select_n_components(var_ratio, goal_var: float) -> int: # Set initial variance explained so far total_variance = 0.0 # Set initial number of features n_components = 0 # For the explained variance of each feature: for explained_variance in var_ratio: # Add the explained variance to the total total_variance += explained_variance # Add one to the number of components n_components += 1 # If we reach our goal level of explained variance if total_variance >= goal_var: # End the loop break # Return the number of components return n_components """ Explanation: Create Function Calculating Number Of Components Required To Pass Threshold End of explanation """ # Run function select_n_components(lda_var_ratios, 0.95) """ Explanation: Run Function End of explanation """
davofis/computational_seismology
07_spectral_elements/se_homo_1d_solution.ipynb
gpl-3.0
# Import all necessary libraries, this is a configuration step for the exercise. # Please run it before the simulation code! import numpy as np import matplotlib.pyplot as plt from gll import gll from lagrange1st import lagrange1st from ricker import ricker # Show the plots in the Notebook. plt.switch_backend("nbagg") """ Explanation: <div style='background-image: url("../../share/images/header.svg") ; padding: 0px ; background-size: cover ; border-radius: 5px ; height: 250px'> <div style="float: right ; margin: 50px ; padding: 20px ; background: rgba(255 , 255 , 255 , 0.7) ; width: 50% ; height: 150px"> <div style="position: relative ; top: 50% ; transform: translatey(-50%)"> <div style="font-size: xx-large ; font-weight: 900 ; color: rgba(0 , 0 , 0 , 0.8) ; line-height: 100%">Computational Seismology</div> <div style="font-size: large ; padding-top: 20px ; color: rgba(0 , 0 , 0 , 0.5)">Spectral Element Method - 1D Elastic Wave Equation</div> </div> </div> </div> Seismo-Live: http://seismo-live.org Authors: David Vargas (@dvargas) Heiner Igel (@heinerigel) Basic Equations This notebook presents the numerical solution for the 1D elastic wave equation \begin{equation} \rho(x) \partial_t^2 u(x,t) = \partial_x (\mu(x) \partial_x u(x,t)) + f(x,t), \end{equation} using the spectral element method. This is done after a series of steps summarized as follow: 1) The wave equation is written into its Weak form 2) Apply stress Free Boundary Condition after integration by parts 3) Approximate the wave field as a linear combination of some basis \begin{equation} u(x,t) \ \approx \ \overline{u}(x,t) \ = \ \sum_{i=1}^{n} u_i(t) \ \varphi_i(x) \end{equation} 4) Use the same basis functions in $u(x, t)$ as test functions in the weak form, the so call Galerkin principle. 6) The continuous weak form is written as a system of linear equations by considering the approximated displacement field. \begin{equation} \mathbf{M}^T\partial_t^2 \mathbf{u} + \mathbf{K}^T\mathbf{u} = \mathbf{f} \end{equation} 7) Time extrapolation with centered finite differences scheme \begin{equation} \mathbf{u}(t + dt) = dt^2 (\mathbf{M}^T)^{-1}[\mathbf{f} - \mathbf{K}^T\mathbf{u}] + 2\mathbf{u} - \mathbf{u}(t-dt). \end{equation} where $\mathbf{M}$ is known as the mass matrix, and $\mathbf{K}$ the stiffness matrix. The above solution is exactly the same presented for the classic finite-element method. Now we introduce appropriated basis functions and integration scheme to efficiently solve the system of matrices. Interpolation with Lagrange Polynomials At the elemental level (see section 7.4), we introduce as interpolating functions the Lagrange polynomials and use $\xi$ as the space variable representing our elemental domain: \begin{equation} \varphi_i \ \rightarrow \ \ell_i^{(N)} (\xi) \ := \ \prod_{j \neq i}^{N+1} \frac{\xi - \xi_j}{\xi_i-\xi_j}, \qquad i,j = 1, 2, \dotsc , N + 1 \end{equation} Numerical Integration The integral of a continuous function $f(x)$ can be calculated after replacing $f(x)$ by a polynomial approximation that can be integrated analytically. As interpolating functions we use again the Lagrange polynomials and obtain Gauss-Lobatto-Legendre quadrature. Here, the GLL points are used to perform the integral. \begin{equation} \int_{-1}^1 f(x) \ dx \approx \int {-1}^1 P_N(x) dx = \sum{i=1}^{N+1} w_i f(x_i) \end{equation} End of explanation """ # Initialization of setup # --------------------------------------------------------------- nt = 10000 # number of time steps xmax = 10000. # Length of domain [m] vs = 2500. # S velocity [m/s] rho = 2000 # Density [kg/m^3] mu = rho * vs**2 # Shear modulus mu N = 3 # Order of Lagrange polynomials ne = 250 # Number of elements Tdom = .2 # Dominant period of Ricker source wavelet iplot = 20 # Plotting each iplot snapshot # variables for elemental matrices Me = np.zeros(N+1, dtype = float) Ke = np.zeros((N+1, N+1), dtype = float) # ---------------------------------------------------------------- # Initialization of GLL points integration weights [xi, w] = gll(N) # xi, N+1 coordinates [-1 1] of GLL points # w Integration weights at GLL locations # Space domain le = xmax/ne # Length of elements # Vector with GLL points k = 0 xg = np.zeros((N*ne)+1) xg[k] = 0 for i in range(1,ne+1): for j in range(0,N): k = k+1 xg[k] = (i-1)*le + .5*(xi[j+1]+1)*le # --------------------------------------------------------------- dxmin = min(np.diff(xg)) eps = 0.1 # Courant value dt = eps*dxmin/vs # Global time step # Mapping - Jacobian J = le/2 Ji = 1/J # Inverse Jacobian # 1st derivative of Lagrange polynomials l1d = lagrange1st(N) # Array with GLL as columns for each N+1 polynomial """ Explanation: 1. Initialization of setup End of explanation """ # Elemental Mass matrix # --------------------------------------------------------------- for i in range(0, N+1): Me[i] = rho * w[i] * J #stored as a vector since it's diagonal # Global Mass matrix # --------------------------------------------------------------- k = -1 ng = (ne-1)*N + N + 1 M = np.zeros(2*ng) for i in range(1, ne+1): for j in range(0, N+1): k = k + 1 if i>1: if j==0: k = k - 1 M[k] = M[k] + Me[j] # Inverse matrix of M # --------------------------------------------------------------- Minv = np.identity(ng) for i in range(0,ng): Minv[i,i] = 1./M[i] # --------------------------------------------------------------- # Display inverse mass matrix inv(M) # --------------------------------------------------------------- plt.imshow(Minv) plt.title('Mass Matrix $\mathbf{M}$') plt.axis("off") plt.tight_layout() plt.show() """ Explanation: 2. The Mass Matrix Now we initialize the mass and stiffness matrices. In general, the mass matrix at the elemental level is given \begin{equation} M_{ji}^e \ = \ w_j \ \rho (\xi) \ \frac{\mathrm{d}x}{\mathrm{d}\xi} \delta_{ij} \vert_ {\xi = \xi_j} \end{equation} Exercise 1 Implements the mass matrix using the integration weights at GLL locations $w$, the jacobian $J$, and density $\rho$. Then, perform the global assembly of the mass matrix, compute its inverse, and display the inverse mass matrix to visually inspect how it looks like. End of explanation """ # Elemental Stiffness Matrix # --------------------------------------------------------------- for i in range(0, N+1): for j in range(0, N+1): for k in range(0, N+1): Ke[i,j] = Ke[i,j] + mu*w[k]*Ji**2 *J*l1d[i,k]*l1d[j,k] # Global Stiffness Matrix # --------------------------------------------------------------- K = np.zeros([ng, ng]) # Values except at element boundaries for k in range(1,ne+1): i0 = (k-1)*N + 1 j0 = i0 for i in range(-1,N): for j in range(-1,N): K[i0+i,j0+j] = Ke[i+1,j+1] # Values at element boundaries for k in range(2,ne+1): i0 = (k - 1)*N j0 = i0 K[i0,j0] = Ke[0,0] + Ke[N,N] # --------------------------------------------------------------- # Display stiffness matrix K # --------------------------------------------------------------- plt.figure() plt.imshow(K) plt.title('Stiffness Matrix $\mathbf{K}$') plt.axis("off") plt.tight_layout() plt.show() """ Explanation: 3. The Stiffness matrix On the other hand, the general form of the stiffness matrix at the elemtal level is \begin{equation} K_{ji}^e \ = \ \sum_{k = 1}^{N+1} w_k \mu (\xi) \partial_\xi \ell_j (\xi) \partial_\xi \ell_i (\xi) \left(\frac{\mathrm{d}\xi}{\mathrm{d}x} \right)^2 \frac{\mathrm{d}x}{\mathrm{d}\xi} \vert_{\xi = \xi_k} \end{equation} Exercise 2 Implements the stiffness matrix using the integration weights at GLL locations $w$, the jacobian $J$, and shear stress $\mu$. Then, perform the global assembly of the mass matrix and display the matrix to visually inspect how it looks like. End of explanation """ # SE Solution, Time extrapolation # --------------------------------------------------------------- # initialize source time function and force vector f src = ricker(dt,Tdom) isrc = int(np.floor(ng/2)) # Source location # Initialization of solution vectors u = np.zeros(ng) uold = u unew = u f = u # Initialize animated plot # --------------------------------------------------------------- plt.figure(figsize=(10,6)) lines = plt.plot(xg, u, lw=1.5) plt.title('SEM 1D Animation', size=16) plt.xlabel(' x (m)') plt.ylabel(' Amplitude ') plt.ion() # set interective mode plt.show() # --------------------------------------------------------------- # Time extrapolation # --------------------------------------------------------------- for it in range(nt): # Source initialization f= np.zeros(ng) if it < len(src): f[isrc-1] = src[it-1] # Time extrapolation unew = dt**2 * Minv @ (f - K @ u) + 2 * u - uold uold, u = u, unew # -------------------------------------- # Animation plot. Display solution if not it % iplot: for l in lines: l.remove() del l # -------------------------------------- # Display lines lines = plt.plot(xg, u, color="black", lw = 1.5) plt.gcf().canvas.draw() """ Explanation: 4. Finite element solution Finally we implement the spectral element solution using the computed mass $M$ and stiffness $K$ matrices together with a finite differences extrapolation scheme \begin{equation} \mathbf{u}(t + dt) = dt^2 (\mathbf{M}^T)^{-1}[\mathbf{f} - \mathbf{K}^T\mathbf{u}] + 2\mathbf{u} - \mathbf{u}(t-dt). \end{equation} End of explanation """
PDBeurope/PDBe_Programming
search_interface/notebooks/search_facets.ipynb
apache-2.0
from mysolr import Solr PDBE_SOLR_URL = "http://wwwdev.ebi.ac.uk/pdbe/search/pdb" solr = Solr(PDBE_SOLR_URL, version=4) UNLIMITED_ROWS = 10000000 # necessary because default in mysolr is mere 10 import logging, sys #reload(logging) # reload is just a hack to make logging work in the notebook, it's usually unnecessary logging.basicConfig( level=logging.INFO, stream=sys.stdout, format='LOG|%(asctime)s|%(levelname)s %(message)s', datefmt='%d-%b-%Y %H:%M:%S' ) logging.getLogger("requests").setLevel(logging.WARNING) def join_with_AND(selectors) : return " AND ".join( ["%s:%s" % (k,v) for k,v in selectors] ) """ Explanation: Search with facetting and grouping Introduction In search_introduction, we saw how basic selectors can be progressively added to a Solr query to find entries of interest.<br> Now we will see how facetting, grouping and pivoting can be used to find interesting facts about your favorite protein. Getting started Let us setup logger and create mysolr instance for the Solr core. End of explanation """ def molecule_name_facet_search(selectors) : response = solr.search(**{ "rows" : UNLIMITED_ROWS, "fl" : "pdb_id, entity_id", "q" : join_with_AND(selectors), "facet" : "true", "facet.limit" : UNLIMITED_ROWS, "facet.mincount" : 1, "facet.field" : "molecule_name", }) num_mols = len(response.documents) mol_name_counts = response.facets['facet_fields']['molecule_name'] logging.info("%d molecules found with %d distinct molecule_names." % (num_mols, len(mol_name_counts.keys()))) for mol_name, nmol in mol_name_counts.items() : logging.info("%3d molecules are named as %s" % (nmol, mol_name)) """ Explanation: Find your protein Identifying previous instances of your protein in the PDB is not an easy task because molecule names given by depositors can differ slightly. The SIFTS project assigns UniProt cross-references to proteins in PDB entries and names them consistently. The following function searches and facets on UniProt name to find proteins of our interest. Note how we are using facet options to identify all distinct values of molecule_name. End of explanation """ molecule_name_facet_search([ ( 'molecule_name' , '/.*[Cc]arbonic.*[aA]nhydrase.*/'), ]) """ Explanation: Let us assume we are interested in carbonic anhydrases. We write the protein name as a regular expression allowing for case changes on start of word. End of explanation """ selectors = [ ( 'molecule_name' , '/.*[Cc]arbonic.*[aA]nhydrase.*/'), ('NOT molecule_name' , '(/.*Putative.*/ OR /.*Inhibitor.*/)'), ] molecule_name_facet_search(selectors) """ Explanation: Note that there are some unintended hits - one putative and another inhibitor. Let us filter those out. End of explanation """ response = solr.search(**{ "rows" : UNLIMITED_ROWS, "fl" : "pdb_id, entity_id", "q" : join_with_AND(selectors), "facet" : "true", "facet.limit" : UNLIMITED_ROWS, "facet.mincount" : 1, "facet.field" : "experimental_method", "group" : "true", "group.facet" : "true", "group.field" : "pdb_id", }) expt_counts = response.facets['facet_fields']['experimental_method'] logging.info("There are %d experimental methods with this protein's structure has been studied." % len(expt_counts)) for expt, count in expt_counts.items() : logging.info("%s : %d" % (expt,count)) """ Explanation: We can also sharpen our search considerably by using annotations like GO, SCOP etc. But the filters should strike a balance in removing spurious hits and keeping genuine ones. Often optimal filters are found through multiple trials. Count entries by experiment type Now let us see a summary of experiment types that have been used to solve carbonic anhydrases. Since experiment is a property entry, and not molecules within it, we need to group on pdb_id and facet in a group-sensitive way so that the counts we get are for entries. End of explanation """ response = solr.search(**{ "rows" : UNLIMITED_ROWS, "fl" : "pdb_id, entity_id", "q" : join_with_AND(selectors), "facet" : "true", "facet.limit" : UNLIMITED_ROWS, "facet.mincount" : 1, "facet.field" : "deposition_year", "group" : "true", "group.facet" : "true", "group.field" : "pdb_id", }) year_counts = response.facets['facet_fields']['deposition_year'] logging.info("There are %d years in which this protein's structure has been studied." % len(year_counts)) for year in sorted(year_counts.keys(), key=lambda x : int(x)) : logging.info("%s : %d" % (year,year_counts[year])) """ Explanation: Count entries by year of deposition Let us now facet on year of deposition and see the years in which an entry was deposited for carbonic anhydrases. End of explanation """ response = solr.search(**{ "rows" : UNLIMITED_ROWS, "fl" : "pdb_id, entity_id", "q" : join_with_AND(selectors), "facet" : "true", "facet.limit" : UNLIMITED_ROWS, "facet.mincount" : 1, "facet.field" : "resolution", "facet.range" : "resolution", "f.resolution.facet.range.start" : "0.0", "f.resolution.facet.range.end" : "100", "f.resolution.facet.range.gap" : "0.5", "f.resolution.facet.range.other" : "between", "f.resolution.facet.range.include" : "upper", "group" : "true", "group.facet" : "true", "group.field" : "pdb_id", }) import string, collections resol_counts = response.facets['facet_ranges']['resolution']['counts'] resol_counts = collections.OrderedDict([(resol_counts[rci], resol_counts[rci+1]) for rci in range(0, len(resol_counts), 2)]) logging.info("Resolutions at which this protein has been solved is as follows:") for resol in sorted(resol_counts.keys(), key=lambda x : string.atof(x)) : logging.info("%3d entries in resolution bin starting %s" % (resol_counts[resol], resol)) """ Explanation: Note that we do not have to facet on one field at a time - we could have facetted on multiple fields individually in the same call - just provide comma-separated fields list. Range-based facets Facets can be defined to be range based, e.g. this is useful for fields like resolution, year, length of crystallographic cell, etc. End of explanation """ response = solr.search(**{ "rows" : UNLIMITED_ROWS, "fl" : "pdb_id, entity_id, deposition_year, resolution", "q" : join_with_AND(selectors), }) resbin_width = 0.5 def resol_bin(resol) : import decimal return decimal.Decimal(int(resol/resbin_width) * resbin_width) yearbin_width = 5 def depyear_bin(year) : return (year / yearbin_width) * yearbin_width entry_counted = set() counts = collections.defaultdict( lambda : collections.defaultdict( lambda: 0 ) ) for adoc in response.documents : if adoc['pdb_id'] not in entry_counted : res_bin = resol_bin(adoc['resolution']) year_bin = depyear_bin(adoc['deposition_year']) counts[year_bin][res_bin] += 1 import itertools year_bins = sorted(counts.keys()) resol_bins = sorted(set( itertools.chain(*[v.keys() for v in counts.values()]) )) logging.info(" " + " ".join("%.1f-%.1f" % (rb,float(rb)+resbin_width) for rb in resol_bins)) for year in year_bins : to_print = ["%d-%d" % (year,year+yearbin_width)] total = 0 for resol in resol_bins : total += counts.get(year, {}).get(resol, 0) for resol in resol_bins : count = counts.get(year, {}).get(resol, 0) to_print.append(count) #, #int(count*100./total), logging.info(to_print[0] + " ".join(["%5d" % tp for tp in to_print[1:]])) """ Explanation: Hierarchical facetting Factes can be used hierarchically too, e.g. facet first on resolution, then on year, etc. Unfortunately mysolr does not support this feature, but the good news is that you can write simple python on documents returned and achieve the same effect. e.g. let us see how to find distribution of resolution vs deposition year in this set of entries. End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/bcc/cmip6/models/bcc-esm1/seaice.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'bcc', 'bcc-esm1', 'seaice') """ Explanation: ES-DOC CMIP6 Model Properties - Seaice MIP Era: CMIP6 Institute: BCC Source ID: BCC-ESM1 Topic: Seaice Sub-Topics: Dynamics, Thermodynamics, Radiative Processes. Properties: 80 (63 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:53:39 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.model.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties --&gt; Model 2. Key Properties --&gt; Variables 3. Key Properties --&gt; Seawater Properties 4. Key Properties --&gt; Resolution 5. Key Properties --&gt; Tuning Applied 6. Key Properties --&gt; Key Parameter Values 7. Key Properties --&gt; Assumptions 8. Key Properties --&gt; Conservation 9. Grid --&gt; Discretisation --&gt; Horizontal 10. Grid --&gt; Discretisation --&gt; Vertical 11. Grid --&gt; Seaice Categories 12. Grid --&gt; Snow On Seaice 13. Dynamics 14. Thermodynamics --&gt; Energy 15. Thermodynamics --&gt; Mass 16. Thermodynamics --&gt; Salt 17. Thermodynamics --&gt; Salt --&gt; Mass Transport 18. Thermodynamics --&gt; Salt --&gt; Thermodynamics 19. Thermodynamics --&gt; Ice Thickness Distribution 20. Thermodynamics --&gt; Ice Floe Size Distribution 21. Thermodynamics --&gt; Melt Ponds 22. Thermodynamics --&gt; Snow Processes 23. Radiative Processes 1. Key Properties --&gt; Model Name of seaice model used. 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of sea ice model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.model.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of sea ice model code (e.g. CICE 4.2, LIM 2.1, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.variables.prognostic') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Sea ice temperature" # "Sea ice concentration" # "Sea ice thickness" # "Sea ice volume per grid cell area" # "Sea ice u-velocity" # "Sea ice v-velocity" # "Sea ice enthalpy" # "Internal ice stress" # "Salinity" # "Snow temperature" # "Snow depth" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Variables List of prognostic variable in the sea ice model. 2.1. Prognostic Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of prognostic variables in the sea ice component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "TEOS-10" # "Constant" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Seawater Properties Properties of seawater relevant to sea ice 3.1. Ocean Freezing Point Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.2. Ocean Freezing Point Value Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If using a constant seawater freezing point, specify this value. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.resolution.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Resolution Resolution of the sea ice grid 4.1. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 This is a string usually used by the modelling group to describe the resolution of this grid e.g. N512L180, T512L70, ORCA025 etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. Canonical Horizontal Resolution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.3. Number Of Horizontal Gridpoints Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Total number of horizontal (XY) points (or degrees of freedom) on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Tuning Applied Tuning applied to sea ice model component 5.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics retained. Document the relative weight given to climate performance metrics versus process oriented metrics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.2. Target Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What was the aim of tuning, e.g. correct sea ice minima, correct seasonal cycle. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.3. Simulations Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 *Which simulations had tuning applied, e.g. all, not historical, only pi-control? * End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.4. Metrics Used Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 List any observed metrics used in tuning model/parameters End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.5. Variables Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Which variables were changed during the tuning process? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Ice strength (P*) in units of N m{-2}" # "Snow conductivity (ks) in units of W m{-1} K{-1} " # "Minimum thickness of ice created in leads (h0) in units of m" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 6. Key Properties --&gt; Key Parameter Values Values of key parameters 6.1. Typical Parameters Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N What values were specificed for the following parameters if used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.2. Additional Parameters Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N If you have any additional paramterised values that you have used (e.g. minimum open water fraction or bare ice albedo), please provide them here as a comma separated list End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.assumptions.description') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Key Properties --&gt; Assumptions Assumptions made in the sea ice model 7.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N General overview description of any key assumptions made in this model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.2. On Diagnostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Note any assumptions that specifically affect the CMIP6 diagnostic sea ice variables. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.3. Missing Processes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List any key processes missing in this model configuration? Provide full details where this affects the CMIP6 diagnostic sea ice variables? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Key Properties --&gt; Conservation Conservation in the sea ice component 8.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Provide a general description of conservation methodology. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.properties') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Energy" # "Mass" # "Salt" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.2. Properties Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Properties conserved in sea ice by the numerical schemes. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.budget') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.3. Budget Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 For each conserved property, specify the output variables which close the related budgets. as a comma separated list. For example: Conserved property, variable1, variable2, variable3 End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 8.4. Was Flux Correction Used Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does conservation involved flux correction? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.5. Corrected Conserved Prognostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 List any variables which are conserved by more than the numerical scheme alone. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Ocean grid" # "Atmosphere Grid" # "Own Grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 9. Grid --&gt; Discretisation --&gt; Horizontal Sea ice discretisation in the horizontal 9.1. Grid Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Grid on which sea ice is horizontal discretised? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Structured grid" # "Unstructured grid" # "Adaptive grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 9.2. Grid Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the type of sea ice grid? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Finite differences" # "Finite elements" # "Finite volumes" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 9.3. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the advection scheme? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 9.4. Thermodynamics Time Step Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the time step in the sea ice model thermodynamic component in seconds. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 9.5. Dynamics Time Step Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the time step in the sea ice model dynamic component in seconds. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.6. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify any additional horizontal discretisation details. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Zero-layer" # "Two-layers" # "Multi-layers" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10. Grid --&gt; Discretisation --&gt; Vertical Sea ice vertical properties 10.1. Layering Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N What type of sea ice vertical layers are implemented for purposes of thermodynamic calculations? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 10.2. Number Of Layers Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 If using multi-layers specify how many. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10.3. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify any additional vertical grid details. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 11. Grid --&gt; Seaice Categories What method is used to represent sea ice categories ? 11.1. Has Mulitple Categories Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Set to true if the sea ice model has multiple sea ice categories. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11.2. Number Of Categories Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 If using sea ice categories specify how many. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.3. Category Limits Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 If using sea ice categories specify each of the category limits. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.4. Ice Thickness Distribution Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the sea ice thickness distribution scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.seaice_categories.other') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.5. Other Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If the sea ice model does not use sea ice categories specify any additional details. For example models that paramterise the ice thickness distribution ITD (i.e there is no explicit ITD) but there is assumed distribution and fluxes are computed accordingly. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 12. Grid --&gt; Snow On Seaice Snow on sea ice details 12.1. Has Snow On Ice Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is snow on ice represented in this model? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 12.2. Number Of Snow Levels Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of vertical levels of snow on ice? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.3. Snow Fraction Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how the snow fraction on sea ice is determined End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.4. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify any additional details related to snow on ice. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.horizontal_transport') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Incremental Re-mapping" # "Prather" # "Eulerian" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13. Dynamics Sea Ice Dynamics 13.1. Horizontal Transport Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the method of horizontal advection of sea ice? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Incremental Re-mapping" # "Prather" # "Eulerian" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.2. Transport In Thickness Space Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the method of sea ice transport in thickness space (i.e. in thickness categories)? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Hibler 1979" # "Rothrock 1975" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.3. Ice Strength Formulation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Which method of sea ice strength formulation is used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.redistribution') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Rafting" # "Ridging" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.4. Redistribution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Which processes can redistribute sea ice (including thickness)? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.dynamics.rheology') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Free-drift" # "Mohr-Coloumb" # "Visco-plastic" # "Elastic-visco-plastic" # "Elastic-anisotropic-plastic" # "Granular" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.5. Rheology Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Rheology, what is the ice deformation formulation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Pure ice latent heat (Semtner 0-layer)" # "Pure ice latent and sensible heat" # "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)" # "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14. Thermodynamics --&gt; Energy Processes related to energy in sea ice thermodynamics 14.1. Enthalpy Formulation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the energy formulation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Pure ice" # "Saline ice" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14.2. Thermal Conductivity Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What type of thermal conductivity is used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Conduction fluxes" # "Conduction and radiation heat fluxes" # "Conduction, radiation and latent heat transport" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14.3. Heat Diffusion Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the method of heat diffusion? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Heat Reservoir" # "Thermal Fixed Salinity" # "Thermal Varying Salinity" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14.4. Basal Heat Flux Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Method by which basal ocean heat flux is handled? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.5. Fixed Salinity Value Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If you have selected {Thermal properties depend on S-T (with fixed salinity)}, supply fixed salinity value for each sea ice layer. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14.6. Heat Content Of Precipitation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method by which the heat content of precipitation is handled. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14.7. Precipitation Effects On Salinity Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If precipitation (freshwater) that falls on sea ice affects the ocean surface salinity please provide further details. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15. Thermodynamics --&gt; Mass Processes related to mass in sea ice thermodynamics 15.1. New Ice Formation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method by which new sea ice is formed in open water. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.2. Ice Vertical Growth And Melt Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method that governs the vertical growth and melt of sea ice. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Floe-size dependent (Bitz et al 2001)" # "Virtual thin ice melting (for single-category)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15.3. Ice Lateral Melting Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the method of sea ice lateral melting? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.4. Ice Surface Sublimation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method that governs sea ice surface sublimation. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.5. Frazil Ice Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method of frazil ice formation. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 16. Thermodynamics --&gt; Salt Processes related to salt in sea ice thermodynamics. 16.1. Has Multiple Sea Ice Salinities Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does the sea ice model use two different salinities: one for thermodynamic calculations; and one for the salt budget? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 16.2. Sea Ice Salinity Thermal Impacts Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does sea ice salinity impact the thermal properties of sea ice? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Prescribed salinity profile" # "Prognostic salinity profile" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17. Thermodynamics --&gt; Salt --&gt; Mass Transport Mass transport of salt 17.1. Salinity Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is salinity determined in the mass transport of salt calculation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 17.2. Constant Salinity Value Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If using a constant salinity value specify this value in PSU? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.3. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the salinity profile used. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Prescribed salinity profile" # "Prognostic salinity profile" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 18. Thermodynamics --&gt; Salt --&gt; Thermodynamics Salt thermodynamics 18.1. Salinity Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is salinity determined in the thermodynamic calculation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 18.2. Constant Salinity Value Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If using a constant salinity value specify this value in PSU? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 18.3. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the salinity profile used. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Virtual (enhancement of thermal conductivity, thin ice melting)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 19. Thermodynamics --&gt; Ice Thickness Distribution Ice thickness distribution details. 19.1. Representation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is the sea ice thickness distribution represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Parameterised" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 20. Thermodynamics --&gt; Ice Floe Size Distribution Ice floe-size distribution details. 20.1. Representation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How is the sea ice floe-size represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 20.2. Additional Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Please provide further details on any parameterisation of floe-size. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 21. Thermodynamics --&gt; Melt Ponds Characteristics of melt ponds. 21.1. Are Included Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are melt ponds included in the sea ice model? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Flocco and Feltham (2010)" # "Level-ice melt ponds" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 21.2. Formulation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What method of melt pond formulation is used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Albedo" # "Freshwater" # "Heat" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 21.3. Impacts Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N What do melt ponds have an impact on? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging') # PROPERTY VALUE(S): # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 22. Thermodynamics --&gt; Snow Processes Thermodynamic processes in snow on sea ice 22.1. Has Snow Aging Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Set to True if the sea ice model has a snow aging scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.2. Snow Aging Scheme Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the snow aging scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 22.3. Has Snow Ice Formation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Set to True if the sea ice model has snow ice formation. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.4. Snow Ice Formation Scheme Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the snow ice formation scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.5. Redistribution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the impact of ridging on snow cover? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Single-layered heat diffusion" # "Multi-layered heat diffusion" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22.6. Heat Diffusion Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What is the heat diffusion through snow methodology in sea ice thermodynamics? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Delta-Eddington" # "Parameterized" # "Multi-band albedo" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23. Radiative Processes Sea Ice Radiative Processes 23.1. Surface Albedo Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Method used to handle surface albedo. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Delta-Eddington" # "Exponential attenuation" # "Ice radiation transmission per category" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23.2. Ice Radiation Transmission Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Method by which solar radiation through sea ice is handled. End of explanation """
adukic/nd101
autoencoder/Convolutional_Autoencoder_Solution.ipynb
mit
%matplotlib inline import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data', validation_size=0) img = mnist.train.images[2] plt.imshow(img.reshape((28, 28)), cmap='Greys_r') """ Explanation: Convolutional Autoencoder Sticking with the MNIST dataset, let's improve our autoencoder's performance using convolutional layers. Again, loading modules and the data. End of explanation """ inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs') targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets') ### Encoder conv1 = tf.layers.conv2d(inputs_, 16, (3,3), padding='same', activation=tf.nn.relu) # Now 28x28x16 maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same') # Now 14x14x16 conv2 = tf.layers.conv2d(maxpool1, 8, (3,3), padding='same', activation=tf.nn.relu) # Now 14x14x8 maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same') # Now 7x7x8 conv3 = tf.layers.conv2d(maxpool2, 8, (3,3), padding='same', activation=tf.nn.relu) # Now 7x7x8 encoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same') # Now 4x4x8 ### Decoder upsample1 = tf.image.resize_nearest_neighbor(encoded, (7,7)) # Now 7x7x8 conv4 = tf.layers.conv2d(upsample1, 8, (3,3), padding='same', activation=tf.nn.relu) # Now 7x7x8 upsample2 = tf.image.resize_nearest_neighbor(conv4, (14,14)) # Now 14x14x8 conv5 = tf.layers.conv2d(upsample2, 8, (3,3), padding='same', activation=tf.nn.relu) # Now 14x14x8 upsample3 = tf.image.resize_nearest_neighbor(conv5, (28,28)) # Now 28x28x8 conv6 = tf.layers.conv2d(upsample3, 16, (3,3), padding='same', activation=tf.nn.relu) # Now 28x28x16 logits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None) #Now 28x28x1 decoded = tf.nn.sigmoid(logits, name='decoded') loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits) cost = tf.reduce_mean(loss) opt = tf.train.AdamOptimizer(0.001).minimize(cost) """ Explanation: Network Architecture The encoder part of the network will be a typical convolutional pyramid. Each convolutional layer will be followed by a max-pooling layer to reduce the dimensions of the layers. The decoder though might be something new to you. The decoder needs to convert from a narrow representation to a wide reconstructed image. For example, the representation could be a 4x4x8 max-pool layer. This is the output of the encoder, but also the input to the decoder. We want to get a 28x28x1 image out from the decoder so we need to work our way back up from the narrow decoder input layer. A schematic of the network is shown below. Here our final encoder layer has size 4x4x8 = 128. The original images have size 28x28 = 784, so the encoded vector is roughlt 16% the size of the original image. These are just suggested sizes for each of the layers. Feel free to change the depths and sizes, but remember our goal here is to find a small representation of the input data. What's going on with the decoder Okay, so the decoder has these "Upsample" layers that you might not have seen before. First off, I'll discuss a bit what these layers aren't. Usually, you'll see deconvolutional layers used to increase the width and height of the layers. They work almost exactly the same as convolutional layers, but it reverse. A stride in the input layer results in a larger stride in the deconvolutional layer. For example, if you have a 3x3 kernel, a 3x3 patch in the input layer will be reduced to one unit in a convolutional layer. Comparatively, one unit in the input layer will be expanded to a 3x3 path in a deconvolutional layer. Deconvolution is often called "transpose convolution" which is what you'll find the TensorFlow API, with tf.nn.conv2d_transpose. However, deconvolutional layers can lead to artifacts in the final images, such as checkerboard patterns. This is due to overlap in the kernels which can be avoided by setting the stride and kernel size equal. In this Distill article from Augustus Odena, et al, the authors show that these checkerboard artifacts can be avoided by resizing the layers using nearest neighbor or bilinear interpolation (upsampling) followed by a convolutional layer. In TensorFlow, this is easily done with tf.image.resize_images, followed by a convolution. Be sure to read the Distill article to get a better understanding of deconvolutional layers and why we're using upsampling. Exercise: Build the network shown above. Remember that a convolutional layer with strides of 1 and 'same' padding won't reduce the height and width. That is, if the input is 28x28 and the convolution layer has stride = 1 and 'same' padding, the convolutional layer will also be 28x28. The max-pool layers are used the reduce the width and height. A stride of 2 will reduce the size by 2. Odena et al claim that nearest neighbor interpolation works best for the upsampling, so make sure to include that as a parameter in tf.image.resize_images or use tf.image.resize_nearest_neighbor. End of explanation """ sess = tf.Session() epochs = 20 batch_size = 200 sess.run(tf.global_variables_initializer()) for e in range(epochs): for ii in range(mnist.train.num_examples//batch_size): batch = mnist.train.next_batch(batch_size) imgs = batch[0].reshape((-1, 28, 28, 1)) batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: imgs, targets_: imgs}) print("Epoch: {}/{}...".format(e+1, epochs), "Training loss: {:.4f}".format(batch_cost)) fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4)) in_imgs = mnist.test.images[:10] reconstructed = sess.run(decoded, feed_dict={inputs_: in_imgs.reshape((10, 28, 28, 1))}) for images, row in zip([in_imgs, reconstructed], axes): for img, ax in zip(images, row): ax.imshow(img.reshape((28, 28)), cmap='Greys_r') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) fig.tight_layout(pad=0.1) sess.close() """ Explanation: Training As before, here wi'll train the network. Instead of flattening the images though, we can pass them in as 28x28x1 arrays. End of explanation """ inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs') targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets') ### Encoder conv1 = tf.layers.conv2d(inputs_, 32, (3,3), padding='same', activation=tf.nn.relu) # Now 28x28x32 maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same') # Now 14x14x32 conv2 = tf.layers.conv2d(maxpool1, 32, (3,3), padding='same', activation=tf.nn.relu) # Now 14x14x32 maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same') # Now 7x7x32 conv3 = tf.layers.conv2d(maxpool2, 16, (3,3), padding='same', activation=tf.nn.relu) # Now 7x7x16 encoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same') # Now 4x4x16 ### Decoder upsample1 = tf.image.resize_nearest_neighbor(encoded, (7,7)) # Now 7x7x16 conv4 = tf.layers.conv2d(upsample1, 16, (3,3), padding='same', activation=tf.nn.relu) # Now 7x7x16 upsample2 = tf.image.resize_nearest_neighbor(conv4, (14,14)) # Now 14x14x16 conv5 = tf.layers.conv2d(upsample2, 32, (3,3), padding='same', activation=tf.nn.relu) # Now 14x14x32 upsample3 = tf.image.resize_nearest_neighbor(conv5, (28,28)) # Now 28x28x32 conv6 = tf.layers.conv2d(upsample3, 32, (3,3), padding='same', activation=tf.nn.relu) # Now 28x28x32 logits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None) #Now 28x28x1 decoded = tf.nn.sigmoid(logits, name='decoded') loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits) cost = tf.reduce_mean(loss) opt = tf.train.AdamOptimizer(0.001).minimize(cost) sess = tf.Session() epochs = 100 batch_size = 200 # Set's how much noise we're adding to the MNIST images noise_factor = 0.5 sess.run(tf.global_variables_initializer()) for e in range(epochs): for ii in range(mnist.train.num_examples//batch_size): batch = mnist.train.next_batch(batch_size) # Get images from the batch imgs = batch[0].reshape((-1, 28, 28, 1)) # Add random noise to the input images noisy_imgs = imgs + noise_factor * np.random.randn(*imgs.shape) # Clip the images to be between 0 and 1 noisy_imgs = np.clip(noisy_imgs, 0., 1.) # Noisy images as inputs, original images as targets batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: noisy_imgs, targets_: imgs}) print("Epoch: {}/{}...".format(e+1, epochs), "Training loss: {:.4f}".format(batch_cost)) """ Explanation: Denoising As I've mentioned before, autoencoders like the ones you've built so far aren't too useful in practive. However, they can be used to denoise images quite successfully just by training the network on noisy images. We can create the noisy images ourselves by adding Gaussian noise to the training images, then clipping the values to be between 0 and 1. We'll use noisy images as input and the original, clean images as targets. Here's an example of the noisy images I generated and the denoised images. Since this is a harder problem for the network, we'll want to use deeper convolutional layers here, more feature maps. I suggest something like 32-32-16 for the depths of the convolutional layers in the encoder, and the same depths going backward through the decoder. Otherwise the architecture is the same as before. Exercise: Build the network for the denoising autoencoder. It's the same as before, but with deeper layers. I suggest 32-32-16 for the depths, but you can play with these numbers, or add more layers. End of explanation """ fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4)) in_imgs = mnist.test.images[:10] noisy_imgs = in_imgs + noise_factor * np.random.randn(*in_imgs.shape) noisy_imgs = np.clip(noisy_imgs, 0., 1.) reconstructed = sess.run(decoded, feed_dict={inputs_: noisy_imgs.reshape((10, 28, 28, 1))}) for images, row in zip([noisy_imgs, reconstructed], axes): for img, ax in zip(images, row): ax.imshow(img.reshape((28, 28)), cmap='Greys_r') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) fig.tight_layout(pad=0.1) """ Explanation: Checking out the performance Here I'm adding noise to the test images and passing them through the autoencoder. It does a suprising great job of removing the noise, even though it's sometimes difficult to tell what the original number is. End of explanation """
dkillick/courses
course_content/notebooks/numpy_intro.ipynb
gpl-3.0
# numpy is generally imported as 'np' import numpy as np print(np) print(np.__version__) """ Explanation: A Workshop Introduction to NumPy The Python language is an excellent tool for general-purpose programming, with a highly readable syntax, rich and powerful data types (strings, lists, sets, dictionaries, arbitrary length integers, etc) and a very comprehensive standard library. It was not, however, designed specifically for mathematical and scientific computing. Neither the language nor its standard library have facilities for the efficient representation of multidimensional datasets, tools for linear algebra and general matrix manipulations (essential building blocks of virtually all scientific computing). For example, Python lists are very flexible containers that can be nested arbitrarily deep and can hold any Python object in them, but they are poorly suited to represent common mathematical constructs like vectors and matrices. It is for this reason that NumPy exists. Workshop Objectives The aim of this workshop is to enable you to: manipulate numerical arrays perform efficient array calculations Table of Contents Getting Started with Numpy Motivation: what are arrays good for? The Array Object Application: Arithmetic and Broadcasting Application: Statistics Application: Efficiency Further Reading Style This workshop is a hands on coding workshop with discussion sections. It can be treated as an exercise for an individual, but the intent is to work as a group with an instructor. Topics are introduced, followed by worked examples and exploration suggestions. These should be attempted individually or in small groups (making sure everyone is keeping their own notes). Each topic concludes with a review and discussion session for the whole group. The Jupyter notebook provides each participant with an environment to run code and make notes. We recommend that you take your own copy of the workshop notebook and customise it through the workshop. This should provide you with a useful resource to refer back to. Getting Started with NumPy <a class="anchor" id="getting-started"></a> NumPy is the fundamental package for scientific computing with Python. Its primary purpose is to provide a powerful N-dimensional array object; the focus for this workshop. To begin with let's import NumPy, check where it is being imported from and check the version. End of explanation """ # an explicit list of numbers anarray = np.array([2, 3, 5, 7, 11, 13, 17, 19, 23]) # an array of zeros of shape(3, 4) zeroarray = np.zeros((3, 4)) # a range from 0 to n-1 rangearray = np.arange(12) # a range from 0 to n-1, reshaped to (2, 3, 5) shapedarray = np.arange(30).reshape(2, 3, 5) """ Explanation: Documentation Here is a link to the NumPy documenetation for v1.11: https://docs.scipy.org/doc/numpy-1.11.0/reference/ There are many online forums with tips and suggestions for solving problems with NumPy, such as http://stackoverflow.com/ Create an Array There are many ways to create a numpy array, such as: End of explanation """ arr = np.ones((3, 2, 4)) print("Array shape:", arr.shape) print("Array element dtype:", arr.dtype) """ Explanation: Experiment Use the code cells below to experiment with these arrays. What can you do with them? Where could you look for clues? What can you find out about these arrays? Motivation: what are arrays good for? <a class="anchor" id="motivation"></a> Extensive Features NumPy provides routines for fast operations on arrays, including mathematical, logical, shape manipulation, sorting, selecting, I/O, discrete Fourier transforms, basic linear algebra, basic statistical operations, random simulation and much more. Fast Calculations It is a lot faster than Python alone for numerical computing tasks. Element-by-element operations are the “default mode” when an ndarray is involved, but the element-by-element operation is speedily executed by pre-compiled C code. Clear Syntax In NumPy c = a * b calculates the element-wise product of a and b, at near-C speeds, but with the code simplicity we expect from something based on Python. This demonstrates a core feature of NumPy: 'vectorization'. Loops iterating through elements of arrays are not required, which can make code easier to read, as well as performing fast calculations. Interfacing to other Libraries Many scientific Python libraries use NumPy as their core array representation. From plotting libraries such as Matplotlib, to parallel processing libraries such as Dask, to data interoperability libraries such as Iris, NumPy arrays are at the core of how these libraries operate and communicate. The Array Object <a class="anchor" id="array-object"></a> The multidimensional array object is the core of NumPy's power. Let us explore this object some more. Array properties Let's create a NumPy array and take a look at some of its properties. End of explanation """ arr = np.array([1, 2, 3, 4, 5, 6]) print("arr --", arr) print("arr[2] --", arr[2]) print("arr[2:5] --", arr[2:5]) print("arr[::2] --", arr[::2]) """ Explanation: Exercise See if you can also find out the array's: number of dimensions, number of elements, and amount of memory used. Hint: you can use help(object) to look up the documentation on any object. Make some more arrays. Some different ways to create arrays can be found at https://docs.scipy.org/doc/numpy/user/basics.creation.html. What are the properties of your arrays? What else can you find out about these arrays? Indexing You can index NumPy arrays in the same way as other Python objects, by using square brackets []. This means we can index to retrieve a single element, multiple consecutive elements, or a more complex sequence: End of explanation """ lst_2d = [[1, 2, 3], [4, 5, 6]] arr_2d = np.array(lst_2d) print("2D list:") print(lst_2d) print('') print("2D array:") print(arr_2d) print('') print("Single array element:") print(arr_2d[1, 2]) print('') print("Single row:") print(arr_2d[1]) print('') print("First two columns:") print(arr_2d[:, :2]) """ Explanation: You can also index multidimensional arrays in a logical way using an enhanced indexing syntax. Remember that Python uses zero-based indexing! End of explanation """ print(arr_2d[arr_2d % 2 == 0]) """ Explanation: Numpy provides syntax to index conditionally, based on the data in the array. You can pass in an array of True and False values (a boolean array), or, more commonly, a condition that returns a boolean array. End of explanation """ print(lst_2d[0:2][1]) print(arr_2d[0:2, 1]) """ Explanation: Exercise Part 1 Why do these indexing examples give the stated results? result of arr_2d[1, 0] is 4 result of arr_2d[0] is [1, 2, 3] result of arr_2d[1, 1:] is [5, 6] result of arr_2d[0:, ::2] is [[1, 3], [4, 6]] Part 2 How would you index arr_2d to retrieve: the third value: resulting in 3 the second row: resulting in [4, 5, 6] the first column: resulting in [1, 4] the first column, retaining the outside dimension: resulting in [[1, 4]] Arrays are not lists Question: why do the following examples produce different results? End of explanation """ arr1 = np.arange(4) arr2 = np.arange(4) print('{} + {} = {}'.format(arr1, arr2, arr1 + arr2)) """ Explanation: The result we just received points to an important piece of learning, which is that in most cases NumPy arrays behave very differently to Python lists. Let's explore the differences (and some similarities) between the two. dtype A NumPy array has a fixed data type, called dtype. This is the type of all the elements of the array. This is in contrast to Python lists, which can hold elements of different types. Exercise What happens in Python when you add an integer to a float? What happens when you put an integer into a NumPy float array? What happens when you do numerical calculations between arrays of different types? The Array Object: Summary of key points properties : shape, dtype arrays are homogeneous, all elements have the same type: dtype creation : array([list]), ones, zeros, arange, linspace: indexing arrays to produce further arrays: views on the original arrays multi-dimensional indexing and conditional indexing Application: Arithmetic and Broadcasting<a class="anchor" id="app-calc"></a> Elementwise Arithmetic <a class="anchor" id="arithmetic_and_broadcasting"></a> You can use NumPy to perform arithmetic operations between two arrays in an element-by-element fashion. End of explanation """ arr = np.arange(4) const = 5 print("Original array: {}".format(arr)) print("") print("Array + const: {}".format(arr + const)) """ Explanation: Exercise Define some arrays and compute some results for different operators. Put the operations in different cells and see what works and what doesn't. It makes intrinsic sense that you should be able to add a constant to all values in an array: End of explanation """ daily_records = np.array([[12, 14, 11], [11, 12, 15]]) print('raw data:') print(daily_records) """ Explanation: Broadcasting There are times when you need to perform calculations between NumPy arrays of different sizes. For example, suppose you have maximum temperatures from each of three recording stations, recorded on two separate days. End of explanation """ offset = np.array([2, 1, 4]) corrected_records = daily_records - offset print('corrected values:') print(corrected_records) """ Explanation: Each station is known to overstate the maximum recorded temperature by a different known constant value. You wish to subtract the appropriate offset from each station's values. You can do that like this: End of explanation """ arr1 = np.ones((2, 3)) arr2 = np.ones((2, 1)) # (arr1 + arr2).shape arr1 = np.ones((2, 3)) arr2 = np.ones(3) # (arr1 + arr2).shape arr1 = np.ones((1, 3)) arr2 = np.ones((2, 1)) # (arr1 + arr2).shape arr1 = np.ones((1, 3)) arr2 = np.ones((1, 2)) # (arr1 + arr2).shape """ Explanation: NumPy allows you to do this easily using a powerful piece of functionality called broadcasting. Broadcasting is a way of treating the arrays as if they had the same dimensions, and thus have elements all corresponding. It is then easy to perform the calculation, element-wise. It does this by matching dimensions in one array to the other where possible, and using repeated values where there is no corresponding dimension in the other array. Rules of Broadcasting Broadcasting applies these three rules: If the two arrays differ in their number of dimensions, the shape of the array with fewer dimensions is padded with ones on its leading (left) side. If the shape of the two arrays does not match in any dimension, either array with shape equal to 1 in a given dimension is stretched to match the other shape. If in any dimension the sizes disagree and neither has shape equal to 1, an error is raised. Note that all of this happens without ever actually creating the expanded arrays in memory! This broadcasting behavior is in practice enormously powerful, especially given that when NumPy broadcasts to create new dimensions or to 'stretch' existing ones, it doesn't actually duplicate the data. In the example above the operation is carried out as if the scalar 1.5 was a 1D array with 1.5 in all of its entries, but no actual array is ever created. This can save lots of memory in cases when the arrays in question are large. As such this can have significant performance implications. (image source) Exercise What will be the result of adding arr1 to arr2 in the following cases, the shape of the resulting array? End of explanation """ days_adjust = np.array([1.5, 3.7]) adjusted = daily_records - days_adjust """ Explanation: Reshaping arrays to aid broadcasting NumPy allows you to change the shape of an array, so long as the total number of elements in the array does not change. For example, we could reshape a flat array with 12 elements to a 2D array with shape (2, 6), or (3, 2, 2), or even (3, 4, 1). We could not, however, reshape it to have shape (2, 5), because the total number of elements would not be kept constant. Now suppose you want to apply a correction for each day. For example, you might try : End of explanation """ a = np.arange(12).reshape((3, 4)) mean = np.mean(a) print(a) print(mean) """ Explanation: but that results in a ValueError Clearly, this doesn't work ! Exercise With reference to the above rules of broadcasting: 1. describe why the above attempt to subtract days_adjust fails 1. work out how you can modify the 'days_adjust' array to get the desired result. Hint: imagine how the 'days_adjust' values should look, when expanded to match the dimensions of 'daily_records'. Exercise Sometimes an operation will produce a result, but not the one you wanted. For example, suppose we have : python A = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) and we wish to add 0, 100 and 400 to the rows. That is, ```python B = np.array([0, 100, 400]) and... desired_result = np.array([[ 1, 2, 3], [104, 105, 406], [407, 408, 409]]) ``` Questions: what will be the result of simply adding A and B ? how can you perform the correct calculation on A and B to get desired_result ? Now go and experiment with this behaviour! You could review some of your experiments that failed from the earlier "Elementwise Arithmetic" section and see if you can now make them work. Arithmetic and Broadcasting: Summary of key points arithmetic operations are performed in an element-by-element fashion, operations can be performed between arrays of different shapes, the arrays' dimensions are aligned according to fixed rules; where one input lacks a given dimension, values are repeated, reshaping can be used to get arrays to combine as required. Statistics <a class="anchor" id="statistics"></a> Numpy arrays support many common statistical calculations. For a list of common operations, see : https://docs.scipy.org/doc/numpy/reference/routines.statistics.html. The simplest operations consist of calculating a single statistical value from an array of numbers -- such as a mean value, a variance or a minimum. For example: End of explanation """ daily_records = np.array([[12, 14, 11], [11, 12, 15]]) masked_data = np.ma.masked_array(daily_records) masked_data[0, 1] = np.ma.masked print('masked data:') print(masked_data) """ Explanation: Exercise What is the mean value over all the values in our daily_records array, given by the np.mean() function ? What other similar statistical operations exist (see above link) ? A mean value can also be calculated with &lt;array&gt;.mean(). Is that the same thing ? How should you calculate a median value -- with np.median(array) or array.median() ? Used without any further arguments, statistical functions simply reduce the whole array to a single value. In practice, however, we very often want to calculate statistics over only some of the dimensions. The most common requirement is to calculate a statistic along a single array dimension, while leaving all the other dimensions intact. This is referred to as "collapsing" or "reducing" the chosen dimension. This is done by adding an "axis" keyword specifying which dimension, such as np.min(data, axis=1). For example, recall that the above "daily_records" data varies over 2 timepoints and 3 locations, mapped to array dimensions 0 and 1 respectively: ``` print daily_records [[12 14 11] [11 12 15]] ``` For this data, by adding the axis keyword, we can calculate either : * (a) axis=0 : a statistic over both days, for each station , or * (b) axis=1 : a statistic over all three stations, for each day. For most statistical functions (but not all), the "axis" keyword will also accept multiple dimensions, e.g. axis=(0, 2). Remember also that the default statistical operation, as seen above, is to collapses over all dimensions, reducing the array to a single value. This is equivalent to coding axis=None. Exercise produce the two kinds of single-axis average mentioned above -- that is: over all days at each station, over all stations for each day. (Hint: look at the documentation -- the term 'axis' in NumPy refers to the number of a dimension.) Create a sample 3-D array, to represent air temperature at given times, X and Y positions : how can you form a mean over all X and Y at each timestep ? what shape does such result have ? how can you calculate the np.ptp statistic of this data in a similar form, i.e. for all X and Y at each timestep ? N.B. ptp means a peak-to-peak range, i.e. "max(data) - min(data)". Masked Arrays Real-world measurements processes often result in certain datapoint values being uncertain or simply "missing". This is usually indicated by additional data quality information, stored alongside the data values. In these cases we often need to make calculations that count only the valid datapoints. Numpy provides a special "masked array" type for this type of calculation. For example, we might know that in our previous daily_records data, the value for station-2 on day-1 is invalid. To represent this missing datapoint, we can make a masked version of the data : End of explanation """ print('unmasked average = ', np.mean(daily_records)) print('masked average = ', np.ma.mean(masked_data)) """ Explanation: The statistics of the masked data version are different: End of explanation """ %%timeit x = range(500) """ Explanation: The np.ma.masked_array() function seen above is a simple creation method for masked data. The sub-module np.ma contains all the NumPy masked-data routines. Instead of masking selected points, as above, a mask is often specified as a "mask array": This is a boolean array of the same size and shape as the data, where False means a good datapoint and True means a masked or missing point. Such a 'boolean mask' can be passed in the np.ma.masked_array creation method, and can be extracted with the np.ma.getmaskarray() function. Note that most file storage formats represent missing data in a different way, using a distinct "missing data" value appearing in the data. There is special support for converting between this type of representation and NumPy masked arrays : Every masked array has a 'fill_value' property and a 'filled()' method to convert between the two. Exercise create a masked array from the numbers 0-11, but where all the values less than 5 are masked. how can you create masked data from an array of positive values where a value of -1.0 represents 'missing' ? what special masked array creation routines exist to do this kind of thing more efficiently ? HINT: look up np.ma.masked_where and related routines. use np.ma.filled() to create a 'plain' (i.e. unmasked) array from a masked one how can you create a plain array from a masked one, but using a different fill-value for masked points ? HINT: there are two easy ways what happens to the 'fill_value' properties when you calculate new masked arrays from old ones ? Statistics and Masked Arrays: Summary of key points most statistical functions are available in two different forms, as in array.mean() and also np.mean(array), the choice being mostly a question of style. statistical operations operate over, and remove (or "collapse") the array dimensions that they are applied to. an "axis" keyword specifies operation over dimensions : this can be one; multiple; or all. NOTE: not all operations permit operation over specifically selected dimensions Statistical operations are not really part of Numpy itself, but are defined by the higher-level Scipy project. Missing datapoints can be represented using "masked arrays" these are useful for calculation, but usually require converting to another form for data storage Efficiency <a class="anchor" id="efficiency"></a> Loops and Vectorised Operations We will now explore calculation performance and consider efficiency in terms of processing time. Firstly let's look at a simple processing time tool that is provided in notebooks; %%timeit : End of explanation """ %%timeit -n 100 -r 5 x = range(500) """ Explanation: Repeat that, specifying only 100 loops and fastest time of 5 runs End of explanation """ rands = np.random.random(1000000).reshape(100, 100, 100) %%timeit -n 10 -r 5 overPointEightLoop = 0 for i in range(100): for j in range(100): for k in range(100): if rands[i, j, k] > 0.8: overPointEightLoop +=1 %%timeit -n 10 -r 5 overPointEightWhere = rands[rands > 0.8].size """ Explanation: This gives us an easy way to evaluate performance for implementations ... End of explanation """ try: a = np.ones((11, 13, 17, 23, 29, 37, 47)) except MemoryError: print('this would have been a memory error') """ Explanation: Clearly this is a trivial example, so let us explore a more complicated case. Exercise: trapezoidal integration In this exercise, you are tasked with implementing the simple trapezoid rule formula for numerical integration. If we want to compute the definite integral $$ \int_{a}^{b}f(x)dx $$ we can partition the integration interval $[a,b]$ into smaller subintervals. We then approximate the area under the curve for each subinterval by calculating the area of the trapezoid created by linearly interpolating between the two function values at each end of the subinterval: For a pre-computed $y$ array (where $y = f(x)$ at discrete samples) the trapezoidal rule equation is: $$ \int_{a}^{b}f(x)dx\approx\frac{1}{2}\sum_{i=1}^{n}\left(x_{i}-x_{i-1}\right)\left(y_{i}+y_{i-1}\right). $$ In pure python, this can be written as: def trapz_slow(x, y): area = 0. for i in range(1, len(x)): area += (x[i] - x[i-1]) * (y[i] + y[i-1]) return area / 2 Part 1 Create two arrays $x$ and $y$, where $x$ is a linearly spaced array in the interval $[0, 3]$ of length 11, and $y$ represents the function $f(x) = x^2$ sampled at $x$. Part 2 Use indexing (not a for loop) to find the 10 values representing $y_{i}+y_{i-1}$ for $i$ between 1 and 11. Hint: What indexing would be needed to get all but the last element of the 1d array y. Similarly what indexing would be needed to get all but the first element of a 1d array. Part 3 Write a function trapz(x, y), that applies the trapezoid formula to pre-computed values, where x and y are 1-d arrays. The function should not use a for loop. Part 4 Verify that your function is correct by using the arrays created in #1 as input to trapz. Your answer should be a close approximation of $\int_0^3 x^2$ which is $9$. Part 5 (extension) numpy and scipy.integrate provide many common integration schemes. Find the documentation for NumPy's own version of the trapezoidal integration scheme and check its result with your own: Part 6 (extension) Write a function trapzf(f, a, b, npts=100) that accepts a function f, the endpoints a and b and the number of samples to take npts. Sample the function uniformly at these points and return the value of the integral. Use the trapzf function to identify the minimum number of sampling points needed to approximate the integral $\int_0^3 x^2$ with an absolute error of $<=0.0001$. (A loop is necessary here.) Memory Errors NumPy can only work with the system memory. If too large an array is realised, a memory error will result. End of explanation """ arr = np.arange(8) arr_view = arr.reshape(2, 4) # Print the "view" array from reshape. print('Before\n', arr_view) # Update the first element of the original array. arr[0] = 1000 # Print the "view" array from reshape again, # noticing the first value has changed. print('After\n', arr_view) """ Explanation: Views on Arrays NumPy attempts to not make copies of arrays unless it is explicitly told to. Many NumPy operations will produce a reference to an existing array, known as a "view", instead of making a whole new array. For example: Indexing and reshaping provide a view of the same memory wherever possible. End of explanation """ arr = np.arange(8) arr_view = arr.reshape(2, 4).copy() # Print the "view" array from reshape. print('Before\n', arr_view) # Update the first element of the original array. arr[0] = 1000 # Print the "view" array from reshape again, # noticing the first value has changed. print('After\n', arr_view) """ Explanation: What this means is that if one array (arr) is modified, the other (arr_view) will also be updated : the same memory is being shared. This is a valuable tool which enables the system memory overhead to be managed, which is particularly useful when handling lots of large arrays. The lack of copying allows for very efficient vectorized operations. Remember, this behaviour is automatic in most of NumPy, so it requires some consideration in your code, it can lead to some bugs that are hard to track down. For example, if you are changing some elements of an array that you are using elsewhere, you may want to explicitly copy that array before making changes. If in doubt, you can always copy the data to a different block of memory with the copy() method. For example ... End of explanation """
ClementPhil/deep-learning
first-neural-network/dlnd-your-first-neural-network.ipynb
mit
%matplotlib inline %config InlineBackend.figure_format = 'retina' import numpy as np import pandas as pd import matplotlib.pyplot as plt """ Explanation: Your first neural network In this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more. End of explanation """ data_path = 'Bike-Sharing-Dataset/hour.csv' rides = pd.read_csv(data_path) rides.head() """ Explanation: Load and prepare the data A critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon! End of explanation """ rides[:24*10].plot(x='dteday', y='cnt') """ Explanation: Checking out the data This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the cnt column. You can see the first few rows of the data above. Below is a plot showing the number of bike riders over the first 10 days in the data set. You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model. End of explanation """ dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday'] for each in dummy_fields: dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False) rides = pd.concat([rides, dummies], axis=1) fields_to_drop = ['instant', 'dteday', 'season', 'weathersit', 'weekday', 'atemp', 'mnth', 'workingday', 'hr'] data = rides.drop(fields_to_drop, axis=1) data.head() """ Explanation: Dummy variables Here we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to get_dummies(). End of explanation """ quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed'] # Store scalings in a dictionary so we can convert back later scaled_features = {} for each in quant_features: mean, std = data[each].mean(), data[each].std() scaled_features[each] = [mean, std] data.loc[:, each] = (data[each] - mean)/std """ Explanation: Scaling target variables To make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1. The scaling factors are saved so we can go backwards when we use the network for predictions. End of explanation """ # Save the last 21 days test_data = data[-21*24:] data = data[:-21*24] # Separate the data into features and targets target_fields = ['cnt', 'casual', 'registered'] features, targets = data.drop(target_fields, axis=1), data[target_fields] test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields] """ Explanation: Splitting the data into training, testing, and validation sets We'll save the last 21 days of the data to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders. End of explanation """ # Hold out the last 60 days of the remaining data as a validation set train_features, train_targets = features[:-60*24], targets[:-60*24] val_features, val_targets = features[-60*24:], targets[-60*24:] """ Explanation: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set). End of explanation """ class NeuralNetwork(object): def sigmoid(x): return 1/(1 + np.exp(-x)) def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate): # Set number of nodes in input, hidden and output layers. self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # Initialize weidghts self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes**-0.5, (self.hidden_nodes, self.input_nodes)) self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes**-0.5, (self.output_nodes, self.hidden_nodes)) self.lr = learning_rate #### Set this to your implemented sigmoid function #### # Activation function is the sigmoid function self.activation_function = lambda x : 1/(1 + np.exp(-x)) #question: as I wrote like this the notebook tells that sigmoid is not defined #self.activation_function= sigmoid #def sigmoid(x): #return 1 / (1 + np.exp(-x)) def train(self, inputs_list, targets_list): # Convert inputs list to 2d array inputs = np.array(inputs_list, ndmin=2).T targets = np.array(targets_list, ndmin=2).T #### Implement the forward pass here #### ### Forward pass ### # TODO: Hidden layer hidden_inputs = np.dot(self.weights_input_to_hidden, inputs) hidden_outputs = self.activation_function(hidden_inputs) # TODO: Output layer final_inputs =np.dot(self.weights_hidden_to_output, hidden_outputs) final_outputs = final_inputs #### Implement the backward pass here #### ### Backward pass ### # TODO: Output error output_errors = targets - final_outputs # Output layer error is the difference between desired target and actual # TODO: Backpropagated error hidden_errors = np.dot(self.weights_hidden_to_output.T, output_errors) # errors propagated to the hidden layer hidden_grad = hidden_outputs * (1 - hidden_outputs) # hidden layer gradients # TODO: Update the weights self.weights_hidden_to_output += self.lr * np.dot(output_errors , hidden_outputs.T) # update hidden-to-output weights with gradient descent step #self.weights_input_to_hidden +=self.lr * np.dot(hidden_errors.T, inputs.T) *hidden_grad# update input-to-hidden weights with gradient descent step self.weights_input_to_hidden += self.lr * np.dot(hidden_errors, inputs.T) * hidden_grad #Question: # I've tried many times to figure out the * and the np.dot, finally I turned for forum's help and use np.dot instead. # and with .T or not the final unit test may not pass, that is confusing sometimes. def run(self, inputs_list): # Run a forward pass through the network inputs = np.array(inputs_list, ndmin=2).T #### Implement the forward pass here #### # TODO: Hidden layer hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)# signals into hidden layer hidden_outputs = self.activation_function(hidden_inputs)# signals from hidden layer # TODO: Output layer final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)# signals into final output layer final_outputs = final_inputs# signals from final output layer return final_outputs def MSE(y, Y): return np.mean((y-Y)**2) """ Explanation: Time to build the network Below you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes. The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called forward propagation. We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called backpropagation. Hint: You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$. Below, you have these tasks: 1. Implement the sigmoid function to use as the activation function. Set self.activation_function in __init__ to your sigmoid function. 2. Implement the forward pass in the train method. 3. Implement the backpropagation algorithm in the train method, including calculating the output error. 4. Implement the forward pass in the run method. End of explanation """ import sys ### Set the hyperparameters here ### epochs = 1500 learning_rate = 0.2 hidden_nodes = 10 output_nodes = 1 N_i = train_features.shape[1] network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate) losses = {'train':[], 'validation':[]} for e in range(epochs): # Go through a random batch of 128 records from the training data set batch = np.random.choice(train_features.index, size=128) for record, target in zip(train_features.ix[batch].values, train_targets.ix[batch]['cnt']): network.train(record, target) # Printing out the training progress train_loss = MSE(network.run(train_features), train_targets['cnt'].values) val_loss = MSE(network.run(val_features), val_targets['cnt'].values) sys.stdout.write("\rProgress: " + str(100 * e/float(epochs))[:4] \ + "% ... Training loss: " + str(train_loss)[:5] \ + " ... Validation loss: " + str(val_loss)[:5]) losses['train'].append(train_loss) losses['validation'].append(val_loss) plt.plot(losses['train'], label='Training loss') plt.plot(losses['validation'], label='Validation loss') plt.legend() plt.ylim(ymax=0.5) """ Explanation: Training the network Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops. You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later. Choose the number of epochs This is the number of times the dataset will pass through the network, each time updating the weights. As the number of epochs increases, the network becomes better and better at predicting the targets in the training set. You'll need to choose enough epochs to train the network well but not too many or you'll be overfitting. Choose the learning rate This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge. Choose the number of hidden nodes The more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose. End of explanation """ fig, ax = plt.subplots(figsize=(8,4)) mean, std = scaled_features['cnt'] predictions = network.run(test_features)*std + mean ax.plot(predictions[0], label='Prediction') ax.plot((test_targets['cnt']*std + mean).values, label='Data') ax.set_xlim(right=len(predictions)) ax.legend() dates = pd.to_datetime(rides.ix[test_data.index]['dteday']) dates = dates.apply(lambda d: d.strftime('%b %d')) ax.set_xticks(np.arange(len(dates))[12::24]) _ = ax.set_xticklabels(dates[12::24], rotation=45) """ Explanation: Check out your predictions Here, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly. End of explanation """ import unittest inputs = [0.5, -0.2, 0.1] targets = [0.4] test_w_i_h = np.array([[0.1, 0.4, -0.3], [-0.2, 0.5, 0.2]]) test_w_h_o = np.array([[0.3, -0.1]]) class TestMethods(unittest.TestCase): ########## # Unit tests for data loading ########## def test_data_path(self): # Test that file path to dataset has been unaltered self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv') def test_data_loaded(self): # Test that data frame loaded self.assertTrue(isinstance(rides, pd.DataFrame)) ########## # Unit tests for network functionality ########## def test_activation(self): network = NeuralNetwork(3, 2, 1, 0.5) # Test that the activation function is a sigmoid self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5)))) def test_train(self): # Test that weights are updated correctly on training network = NeuralNetwork(3, 2, 1, 0.5) network.weights_input_to_hidden = test_w_i_h.copy() network.weights_hidden_to_output = test_w_h_o.copy() network.train(inputs, targets) self.assertTrue(np.allclose(network.weights_hidden_to_output, np.array([[ 0.37275328, -0.03172939]]))) self.assertTrue(np.allclose(network.weights_input_to_hidden, np.array([[ 0.10562014, 0.39775194, -0.29887597], [-0.20185996, 0.50074398, 0.19962801]]))) def test_run(self): # Test correctness of run method network = NeuralNetwork(3, 2, 1, 0.5) network.weights_input_to_hidden = test_w_i_h.copy() network.weights_hidden_to_output = test_w_h_o.copy() self.assertTrue(np.allclose(network.run(inputs), 0.09998924)) suite = unittest.TestLoader().loadTestsFromModule(TestMethods()) unittest.TextTestRunner().run(suite) """ Explanation: Thinking about your results Answer these questions about your results. How well does the model predict the data? Where does it fail? Why does it fail where it does? Note: You can edit the text in this cell by double clicking on it. When you want to render the text, press control + enter Your answer below Unit tests Run these unit tests to check the correctness of your network implementation. These tests must all be successful to pass the project. End of explanation """
ESSS/notebooks
interpolation_to_a_structured_grid_from_a_cloud_of_points.ipynb
mit
# Imports import math import seaborn import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib import cm from sklearn.neighbors import NearestNeighbors from sklearn.preprocessing import normalize """ Explanation: Interpolation to a structured grid from a cloud of points First of all, we have a cloud of XYZ points and we want to predict a vertice of a 2D structured grid based on this cloud of points (I will abreviate that as CP and the structured grid as SG). It's important to note that, the CP has 3 values which describes each point: * the X value which represents the value in the X axis; * the Y value which represents the value in the Y axis; * The Z value which describes a property value (like temperature, or depth, or pressure, etc). So, I'm going to use KNN (K-Nearest Neighbors) strategy to search for the nearest points of each vertice of the SG and I must predict the property value of that vertice. However if the nearest point of the vertice is too far away I will assume that point has to be blank (or in this case I will just put a zero value). I'm going to explain in the next sections how to apply this interpolation. Obs: For more information about KNN algorithm please visit: * http://scikit-learn.org/stable/modules/neighbors.html * https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm Imports End of explanation """ # Loading xyz map correct_map = pd.read_csv('correct_map.xyz', sep=' ', dtype='d', header=None, names=['x', 'y', 'z']) scattered_data_10000 = pd.read_csv('scattered_data_10000.xyz', sep=' ', dtype='d', header=None, names=['x', 'y', 'z']) NI = 100 NJ = 100 number_neighbors = 5 # Creating grid points x_grid = np.linspace(1, 10, NI) y_grid = np.linspace(1, 10, NJ) grid_points = pd.DataFrame() grid_points['x'] = np.tile(x_grid, NJ) grid_points['y'] = y_grid.repeat(NI) grid_points['z'] = np.zeros(NI*NJ) """ Explanation: Loading the points and creating the structured grid End of explanation """ import matplotlib as mpl %matplotlib inline mpl.rcParams['savefig.dpi'] = 250 mpl.rcParams['figure.dpi'] = 250 grid_points.plot(kind='scatter', x='x', y='y', marker='.', s=5) plt.show() """ Explanation: Structured Grid: End of explanation """ scattered_data_10000.plot(kind='scatter', x='x', y='y', marker='.', s=5) plt.show() """ Explanation: Cloud of Points: End of explanation """ # Applying KNN neighbors = NearestNeighbors(n_neighbors=number_neighbors, algorithm='ball_tree').fit(scattered_data_10000.loc[:, ['x', 'y']]) # Distance and index of each point from each vertice of the grid distances, indexes = neighbors.kneighbors(grid_points.loc[:, ['x', 'y']]) """ Explanation: Applying KNN End of explanation """ # Maximum and minimum values in X axis max_x = grid_points.loc[:, 'x'].max() min_x = grid_points.loc[:, 'x'].min() # Maximum and minimum values in Y axis max_y = grid_points.loc[:, 'y'].max() min_y = grid_points.loc[:, 'y'].min() # Step X and Step Y step_x = (max_x - min_x) / NI step_y = (max_y - min_y) / NJ # Radius radius = 2 * math.sqrt((step_x ** 2) + (step_y ** 2)) """ Explanation: Calculating the radius which the nearest point has to be located | Symbol | Meaning | |:------: |:-------:| |$$ S_x $$| Step in X axis| |$$ x_{max} $$| Maximum value of X in grid| |$$ x_{max} $$| Minimum value of X in grid| |$$ N_i $$ | Number of vertices in X axis| |$$ S_y $$| Step in Y axis| |$$ y_{max} $$| Maximum value of Y in grid| |$$ y_{max} $$| Minimum value of Y in grid| |$$ N_j $$ | Number of vertices in Y axis| |$$ R $$ | Radius | |$$ d_{norm} $$| Distance normalized| |$$ n $$ | Number of neighbors| |$$ w $$ | One minus the normalization, represent the weight of each distance | |$$ P $$ | Result of the scalar points | |$$ Z $$ | Represents the property value | Formula to calculate each axis step $$ S_x = \frac{x_{max} - x_{min}}{N_i} $$ $$ S_y = \frac{y_{max} - y_{min}}{N_j} $$ Formula to calculate the radius which will be the maximum distance which the first nearest point needs to be located $$ R = 2\sqrt{S_x^2 + S_y^2} $$ In Python: End of explanation """ less_radius = distances[:, 0] <= radius distances = distances[less_radius, :] indexes = indexes[less_radius, :] """ Explanation: Selecting the points which the distance are equal or less than the radius: End of explanation """ # Using the scikit-learn library weight_norm = 1 - normalize(distances, axis=1) """ Explanation: It is interesting to normalize the distance and subtract the value from 1. That will be the weight of each distance. Using the l2 normalization which can be represented by: $$ d_{norm} = \frac{d}{\sqrt{\sum_{i=1}^{n} d_i}} $$ The weight of each distance will be: $$ w = 1 - d_{norm} $$ In python: End of explanation """ prod = weight_norm * scattered_data_10000.values[indexes, 2] scalars = np.full(NI * NJ, 0.0) grid_points.loc[less_radius, 'z'] = prod.sum(axis=1) / (weight_norm.sum(axis=1)) """ Explanation: Formula to calculate the value for each vertice of the strcutured grid $$ P = \frac{\sum_{i=1}^{n} (w_{i}\times Z_i)}{\sum_{j=1}^{n} w_{j}} $$ In Python: End of explanation """ plt.pcolor(correct_map.values[:, 0].reshape(NI, NJ), correct_map.values[:, 1].reshape(NI, NJ), correct_map.values[:, 2].reshape(NI, NJ), cmap=cm.jet) """ Explanation: Example - Map desired End of explanation """ plt.pcolor(grid_points.values[:, 0].reshape(NI, NJ), grid_points.values[:, 1].reshape(NI, NJ), grid_points.values[:, 2].reshape(NI, NJ), cmap=cm.jet) """ Explanation: Map reconstructed using the algorithm described End of explanation """ dif_map = correct_map.z - grid_points.z dif_map.describe() error = (grid_points.z / correct_map.z) - 1 plt.hist(error) error[error < 0] *= -1 error.describe() """ Explanation: Error I'm going to calculate the error of the map reconstructed. End of explanation """
AllenDowney/ThinkStats2
code/chap12ex.ipynb
gpl-3.0
from os.path import basename, exists def download(url): filename = basename(url) if not exists(filename): from urllib.request import urlretrieve local, _ = urlretrieve(url, filename) print("Downloaded " + local) download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/thinkstats2.py") download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/thinkplot.py") import numpy as np import pandas as pd import random import thinkstats2 import thinkplot """ Explanation: Chapter 12 Examples and Exercises from Think Stats, 2nd Edition http://thinkstats2.com Copyright 2016 Allen B. Downey MIT License: https://opensource.org/licenses/MIT End of explanation """ download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/mj-clean.csv") transactions = pd.read_csv("mj-clean.csv", parse_dates=[5]) transactions.head() """ Explanation: Time series analysis NOTE: Some of the example in this chapter have been updated to work with more recent versions of the libraries. Load the data from "Price of Weed". End of explanation """ def GroupByDay(transactions, func=np.mean): """Groups transactions by day and compute the daily mean ppg. transactions: DataFrame of transactions returns: DataFrame of daily prices """ grouped = transactions[["date", "ppg"]].groupby("date") daily = grouped.aggregate(func) daily["date"] = daily.index start = daily.date[0] one_year = np.timedelta64(1, "Y") daily["years"] = (daily.date - start) / one_year return daily """ Explanation: The following function takes a DataFrame of transactions and compute daily averages. End of explanation """ def GroupByQualityAndDay(transactions): """Divides transactions by quality and computes mean daily price. transaction: DataFrame of transactions returns: map from quality to time series of ppg """ groups = transactions.groupby("quality") dailies = {} for name, group in groups: dailies[name] = GroupByDay(group) return dailies """ Explanation: The following function returns a map from quality name to a DataFrame of daily averages. End of explanation """ dailies = GroupByQualityAndDay(transactions) """ Explanation: dailies is the map from quality name to DataFrame. End of explanation """ import matplotlib.pyplot as plt thinkplot.PrePlot(rows=3) for i, (name, daily) in enumerate(dailies.items()): thinkplot.SubPlot(i + 1) title = "Price per gram ($)" if i == 0 else "" thinkplot.Config(ylim=[0, 20], title=title) thinkplot.Scatter(daily.ppg, s=10, label=name) if i == 2: plt.xticks(rotation=30) thinkplot.Config() else: thinkplot.Config(xticks=[]) """ Explanation: The following plots the daily average price for each quality. End of explanation """ import statsmodels.formula.api as smf def RunLinearModel(daily): model = smf.ols("ppg ~ years", data=daily) results = model.fit() return model, results """ Explanation: We can use statsmodels to run a linear model of price as a function of time. End of explanation """ from IPython.display import display for name, daily in dailies.items(): model, results = RunLinearModel(daily) print(name) display(results.summary()) """ Explanation: Here's what the results look like. End of explanation """ def PlotFittedValues(model, results, label=""): """Plots original data and fitted values. model: StatsModel model object results: StatsModel results object """ years = model.exog[:, 1] values = model.endog thinkplot.Scatter(years, values, s=15, label=label) thinkplot.Plot(years, results.fittedvalues, label="model", color="#ff7f00") """ Explanation: Now let's plot the fitted model with the data. End of explanation """ def PlotLinearModel(daily, name): """Plots a linear fit to a sequence of prices, and the residuals. daily: DataFrame of daily prices name: string """ model, results = RunLinearModel(daily) PlotFittedValues(model, results, label=name) thinkplot.Config( title="Fitted values", xlabel="Years", xlim=[-0.1, 3.8], ylabel="Price per gram ($)", ) """ Explanation: The following function plots the original data and the fitted curve. End of explanation """ name = "high" daily = dailies[name] PlotLinearModel(daily, name) """ Explanation: Here are results for the high quality category: End of explanation """ array = np.arange(10) """ Explanation: Moving averages As a simple example, I'll show the rolling average of the numbers from 1 to 10. End of explanation """ series = pd.Series(array) series.rolling(3).mean() """ Explanation: With a "window" of size 3, we get the average of the previous 3 elements, or nan when there are fewer than 3. End of explanation """ def PlotRollingMean(daily, name): """Plots rolling mean. daily: DataFrame of daily prices """ dates = pd.date_range(daily.index.min(), daily.index.max()) reindexed = daily.reindex(dates) thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.2, label=name) roll_mean = pd.Series(reindexed.ppg).rolling(30).mean() thinkplot.Plot(roll_mean, label="rolling mean", color="#ff7f00") plt.xticks(rotation=30) thinkplot.Config(ylabel="price per gram ($)") """ Explanation: The following function plots the rolling mean. End of explanation """ PlotRollingMean(daily, name) """ Explanation: Here's what it looks like for the high quality category. End of explanation """ def PlotEWMA(daily, name): """Plots rolling mean. daily: DataFrame of daily prices """ dates = pd.date_range(daily.index.min(), daily.index.max()) reindexed = daily.reindex(dates) thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.2, label=name) roll_mean = reindexed.ppg.ewm(30).mean() thinkplot.Plot(roll_mean, label="EWMA", color="#ff7f00") plt.xticks(rotation=30) thinkplot.Config(ylabel="price per gram ($)") PlotEWMA(daily, name) """ Explanation: The exponentially-weighted moving average gives more weight to more recent points. End of explanation """ def FillMissing(daily, span=30): """Fills missing values with an exponentially weighted moving average. Resulting DataFrame has new columns 'ewma' and 'resid'. daily: DataFrame of daily prices span: window size (sort of) passed to ewma returns: new DataFrame of daily prices """ dates = pd.date_range(daily.index.min(), daily.index.max()) reindexed = daily.reindex(dates) ewma = pd.Series(reindexed.ppg).ewm(span=span).mean() resid = (reindexed.ppg - ewma).dropna() fake_data = ewma + thinkstats2.Resample(resid, len(reindexed)) reindexed.ppg.fillna(fake_data, inplace=True) reindexed["ewma"] = ewma reindexed["resid"] = reindexed.ppg - ewma return reindexed def PlotFilled(daily, name): """Plots the EWMA and filled data. daily: DataFrame of daily prices """ filled = FillMissing(daily, span=30) thinkplot.Scatter(filled.ppg, s=15, alpha=0.2, label=name) thinkplot.Plot(filled.ewma, label="EWMA", color="#ff7f00") plt.xticks(rotation=30) thinkplot.Config(ylabel="Price per gram ($)") """ Explanation: We can use resampling to generate missing values with the right amount of noise. End of explanation """ PlotFilled(daily, name) """ Explanation: Here's what the EWMA model looks like with missing values filled. End of explanation """ def SerialCorr(series, lag=1): xs = series[lag:] ys = series.shift(lag)[lag:] corr = thinkstats2.Corr(xs, ys) return corr """ Explanation: Serial correlation The following function computes serial correlation with the given lag. End of explanation """ filled_dailies = {} for name, daily in dailies.items(): filled_dailies[name] = FillMissing(daily, span=30) """ Explanation: Before computing correlations, we'll fill missing values. End of explanation """ for name, filled in filled_dailies.items(): corr = thinkstats2.SerialCorr(filled.ppg, lag=1) print(name, corr) """ Explanation: Here are the serial correlations for raw price data. End of explanation """ for name, filled in filled_dailies.items(): corr = thinkstats2.SerialCorr(filled.resid, lag=1) print(name, corr) """ Explanation: It's not surprising that there are correlations between consecutive days, because there are obvious trends in the data. It is more interested to see whether there are still correlations after we subtract away the trends. End of explanation """ rows = [] for lag in [1, 7, 30, 365]: print(lag, end="\t") for name, filled in filled_dailies.items(): corr = SerialCorr(filled.resid, lag) print("%.2g" % corr, end="\t") print() """ Explanation: Even if the correlations between consecutive days are weak, there might be correlations across intervals of one week, one month, or one year. End of explanation """ # NOTE: acf throws a FutureWarning because we need to replace `unbiased` with `adjusted`, # just as soon as Colab gets updated :) import warnings warnings.simplefilter(action="ignore", category=FutureWarning) import statsmodels.tsa.stattools as smtsa filled = filled_dailies["high"] acf = smtsa.acf(filled.resid, nlags=365, unbiased=True, fft=False) print("%0.2g, %.2g, %0.2g, %0.2g, %0.2g" % (acf[0], acf[1], acf[7], acf[30], acf[365])) """ Explanation: The strongest correlation is a weekly cycle in the medium quality category. Autocorrelation The autocorrelation function is the serial correlation computed for all lags. We can use it to replicate the results from the previous section. End of explanation """ def SimulateAutocorrelation(daily, iters=1001, nlags=40): """Resample residuals, compute autocorrelation, and plot percentiles. daily: DataFrame iters: number of simulations to run nlags: maximum lags to compute autocorrelation """ # run simulations t = [] for _ in range(iters): filled = FillMissing(daily, span=30) resid = thinkstats2.Resample(filled.resid) acf = smtsa.acf(resid, nlags=nlags, unbiased=True, fft=False)[1:] t.append(np.abs(acf)) high = thinkstats2.PercentileRows(t, [97.5])[0] low = -high lags = range(1, nlags + 1) thinkplot.FillBetween(lags, low, high, alpha=0.2, color="gray") """ Explanation: To get a sense of how much autocorrelation we should expect by chance, we can resample the data (which eliminates any actual autocorrelation) and compute the ACF. End of explanation """ def PlotAutoCorrelation(dailies, nlags=40, add_weekly=False): """Plots autocorrelation functions. dailies: map from category name to DataFrame of daily prices nlags: number of lags to compute add_weekly: boolean, whether to add a simulated weekly pattern """ thinkplot.PrePlot(3) daily = dailies["high"] SimulateAutocorrelation(daily) for name, daily in dailies.items(): if add_weekly: daily.ppg = AddWeeklySeasonality(daily) filled = FillMissing(daily, span=30) acf = smtsa.acf(filled.resid, nlags=nlags, unbiased=True, fft=False) lags = np.arange(len(acf)) thinkplot.Plot(lags[1:], acf[1:], label=name) """ Explanation: The following function plots the actual autocorrelation for lags up to 40 days. The flag add_weekly indicates whether we should add a simulated weekly cycle. End of explanation """ def AddWeeklySeasonality(daily): """Adds a weekly pattern. daily: DataFrame of daily prices returns: new DataFrame of daily prices """ fri_or_sat = (daily.index.dayofweek == 4) | (daily.index.dayofweek == 5) fake = daily.ppg.copy() fake[fri_or_sat] += np.random.uniform(0, 2, fri_or_sat.sum()) return fake """ Explanation: To show what a strong weekly cycle would look like, we have the option of adding a price increase of 1-2 dollars on Friday and Saturdays. End of explanation """ axis = [0, 41, -0.2, 0.2] PlotAutoCorrelation(dailies, add_weekly=False) thinkplot.Config(axis=axis, loc="lower right", ylabel="correlation", xlabel="lag (day)") """ Explanation: Here's what the real ACFs look like. The gray regions indicate the levels we expect by chance. End of explanation """ PlotAutoCorrelation(dailies, add_weekly=True) thinkplot.Config(axis=axis, loc="lower right", xlabel="lag (days)") """ Explanation: Here's what it would look like if there were a weekly cycle. End of explanation """ def GenerateSimplePrediction(results, years): """Generates a simple prediction. results: results object years: sequence of times (in years) to make predictions for returns: sequence of predicted values """ n = len(years) inter = np.ones(n) d = dict(Intercept=inter, years=years, years2=years**2) predict_df = pd.DataFrame(d) predict = results.predict(predict_df) return predict def PlotSimplePrediction(results, years): predict = GenerateSimplePrediction(results, years) thinkplot.Scatter(daily.years, daily.ppg, alpha=0.2, label=name) thinkplot.plot(years, predict, color="#ff7f00") xlim = years[0] - 0.1, years[-1] + 0.1 thinkplot.Config( title="Predictions", xlabel="Years", xlim=xlim, ylabel="Price per gram ($)", loc="upper right", ) """ Explanation: Prediction The simplest way to generate predictions is to use statsmodels to fit a model to the data, then use the predict method from the results. End of explanation """ name = "high" daily = dailies[name] _, results = RunLinearModel(daily) years = np.linspace(0, 5, 101) PlotSimplePrediction(results, years) """ Explanation: Here's what the prediction looks like for the high quality category, using the linear model. End of explanation """ def SimulateResults(daily, iters=101, func=RunLinearModel): """Run simulations based on resampling residuals. daily: DataFrame of daily prices iters: number of simulations func: function that fits a model to the data returns: list of result objects """ _, results = func(daily) fake = daily.copy() result_seq = [] for _ in range(iters): fake.ppg = results.fittedvalues + thinkstats2.Resample(results.resid) _, fake_results = func(fake) result_seq.append(fake_results) return result_seq """ Explanation: When we generate predictions, we want to quatify the uncertainty in the prediction. We can do that by resampling. The following function fits a model to the data, computes residuals, then resamples from the residuals to general fake datasets. It fits the same model to each fake dataset and returns a list of results. End of explanation """ def GeneratePredictions(result_seq, years, add_resid=False): """Generates an array of predicted values from a list of model results. When add_resid is False, predictions represent sampling error only. When add_resid is True, they also include residual error (which is more relevant to prediction). result_seq: list of model results years: sequence of times (in years) to make predictions for add_resid: boolean, whether to add in resampled residuals returns: sequence of predictions """ n = len(years) d = dict(Intercept=np.ones(n), years=years, years2=years**2) predict_df = pd.DataFrame(d) predict_seq = [] for fake_results in result_seq: predict = fake_results.predict(predict_df) if add_resid: predict += thinkstats2.Resample(fake_results.resid, n) predict_seq.append(predict) return predict_seq """ Explanation: To generate predictions, we take the list of results fitted to resampled data. For each model, we use the predict method to generate predictions, and return a sequence of predictions. If add_resid is true, we add resampled residuals to the predicted values, which generates predictions that include predictive uncertainty (due to random noise) as well as modeling uncertainty (due to random sampling). End of explanation """ def PlotPredictions(daily, years, iters=101, percent=90, func=RunLinearModel): """Plots predictions. daily: DataFrame of daily prices years: sequence of times (in years) to make predictions for iters: number of simulations percent: what percentile range to show func: function that fits a model to the data """ result_seq = SimulateResults(daily, iters=iters, func=func) p = (100 - percent) / 2 percents = p, 100 - p predict_seq = GeneratePredictions(result_seq, years, add_resid=True) low, high = thinkstats2.PercentileRows(predict_seq, percents) thinkplot.FillBetween(years, low, high, alpha=0.3, color="gray") predict_seq = GeneratePredictions(result_seq, years, add_resid=False) low, high = thinkstats2.PercentileRows(predict_seq, percents) thinkplot.FillBetween(years, low, high, alpha=0.5, color="gray") """ Explanation: To visualize predictions, I show a darker region that quantifies modeling uncertainty and a lighter region that quantifies predictive uncertainty. End of explanation """ years = np.linspace(0, 5, 101) thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name) PlotPredictions(daily, years) xlim = years[0] - 0.1, years[-1] + 0.1 thinkplot.Config( title="Predictions", xlabel="Years", xlim=xlim, ylabel="Price per gram ($)" ) """ Explanation: Here are the results for the high quality category. End of explanation """ def SimulateIntervals(daily, iters=101, func=RunLinearModel): """Run simulations based on different subsets of the data. daily: DataFrame of daily prices iters: number of simulations func: function that fits a model to the data returns: list of result objects """ result_seq = [] starts = np.linspace(0, len(daily), iters).astype(int) for start in starts[:-2]: subset = daily[start:] _, results = func(subset) fake = subset.copy() for _ in range(iters): fake.ppg = results.fittedvalues + thinkstats2.Resample(results.resid) _, fake_results = func(fake) result_seq.append(fake_results) return result_seq """ Explanation: But there is one more source of uncertainty: how much past data should we use to build the model? The following function generates a sequence of models based on different amounts of past data. End of explanation """ def PlotIntervals(daily, years, iters=101, percent=90, func=RunLinearModel): """Plots predictions based on different intervals. daily: DataFrame of daily prices years: sequence of times (in years) to make predictions for iters: number of simulations percent: what percentile range to show func: function that fits a model to the data """ result_seq = SimulateIntervals(daily, iters=iters, func=func) p = (100 - percent) / 2 percents = p, 100 - p predict_seq = GeneratePredictions(result_seq, years, add_resid=True) low, high = thinkstats2.PercentileRows(predict_seq, percents) thinkplot.FillBetween(years, low, high, alpha=0.2, color="gray") """ Explanation: And this function plots the results. End of explanation """ name = "high" daily = dailies[name] thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name) PlotIntervals(daily, years) PlotPredictions(daily, years) xlim = years[0] - 0.1, years[-1] + 0.1 thinkplot.Config( title="Predictions", xlabel="Years", xlim=xlim, ylabel="Price per gram ($)" ) """ Explanation: Here's what the high quality category looks like if we take into account uncertainty about how much past data to use. End of explanation """ name = "high" daily = dailies[name] filled = FillMissing(daily) diffs = filled.ppg.diff() thinkplot.plot(diffs) plt.xticks(rotation=30) thinkplot.Config(ylabel="Daily change in price per gram ($)") filled["slope"] = diffs.ewm(span=365).mean() thinkplot.plot(filled.slope[-365:]) plt.xticks(rotation=30) thinkplot.Config(ylabel="EWMA of diff ($)") # extract the last inter and the mean of the last 30 slopes start = filled.index[-1] inter = filled.ewma[-1] slope = filled.slope[-30:].mean() start, inter, slope # reindex the DataFrame, adding a year to the end dates = pd.date_range(filled.index.min(), filled.index.max() + np.timedelta64(365, "D")) predicted = filled.reindex(dates) # generate predicted values and add them to the end predicted["date"] = predicted.index one_day = np.timedelta64(1, "D") predicted["days"] = (predicted.date - start) / one_day predict = inter + slope * predicted.days predicted.ewma.fillna(predict, inplace=True) # plot the actual values and predictions thinkplot.Scatter(daily.ppg, alpha=0.1, label=name) thinkplot.Plot(predicted.ewma, color="#ff7f00") """ Explanation: Exercises Exercise: The linear model I used in this chapter has the obvious drawback that it is linear, and there is no reason to expect prices to change linearly over time. We can add flexibility to the model by adding a quadratic term, as we did in Section 11.3. Use a quadratic model to fit the time series of daily prices, and use the model to generate predictions. You will have to write a version of RunLinearModel that runs that quadratic model, but after that you should be able to reuse code from the chapter to generate predictions. Exercise: Write a definition for a class named SerialCorrelationTest that extends HypothesisTest from Section 9.2. It should take a series and a lag as data, compute the serial correlation of the series with the given lag, and then compute the p-value of the observed correlation. Use this class to test whether the serial correlation in raw price data is statistically significant. Also test the residuals of the linear model and (if you did the previous exercise), the quadratic model. Bonus Example: There are several ways to extend the EWMA model to generate predictions. One of the simplest is something like this: Compute the EWMA of the time series and use the last point as an intercept, inter. Compute the EWMA of differences between successive elements in the time series and use the last point as a slope, slope. To predict values at future times, compute inter + slope * dt, where dt is the difference between the time of the prediction and the time of the last observation. End of explanation """
IBMDecisionOptimization/docplex-examples
examples/cp/jupyter/truck_fleet.ipynb
apache-2.0
from sys import stdout try: import docplex.cp except: if hasattr(sys, 'real_prefix'): #we are in a virtual env. !pip install docplex else: !pip install --user docplex """ Explanation: The Truck Fleet puzzle This tutorial includes everything you need to set up decision optimization engines, build constraint programming models. When you finish this tutorial, you'll have a foundational knowledge of Prescriptive Analytics. This notebook is part of Prescriptive Analytics for Python It requires either an installation of CPLEX Optimizers or it can be run on IBM Cloud Pak for Data as a Service (Sign up for a free IBM Cloud account and you can start using IBM Cloud Pak for Data as a Service right away). CPLEX is available on <i>IBM Cloud Pack for Data</i> and <i>IBM Cloud Pak for Data as a Service</i>: - <i>IBM Cloud Pak for Data as a Service</i>: Depends on the runtime used: - <i>Python 3.x</i> runtime: Community edition - <i>Python 3.x + DO</i> runtime: full edition - <i>Cloud Pack for Data</i>: Community edition is installed by default. Please install DO addon in Watson Studio Premium for the full edition Table of contents: Describe the business problem How decision optimization (prescriptive analytics) can help Use decision optimization Step 1: Download the library Step 2: Model the Data Step 3: Set up the prescriptive model Prepare data for modeling Define the decision variables Express the business constraints Express the objective Solve with Decision Optimization solve service Step 4: Investigate the solution and run an example analysis Summary Describe the business problem The problem is to deliver some orders to several clients with a single truck. Each order consists of a given quantity of a product of a certain type. A product type is an integer in {0, 1, 2}. Loading the truck with at least one product of a given type requires some specific installations. The truck can be configured in order to handle one, two or three different types of product. There are 7 different configurations for the truck, corresponding to the 7 possible combinations of product types: configuration 0: all products are of type 0, configuration 1: all products are of type 1, configuration 2: all products are of type 2, configuration 3: products are of type 0 or 1, configuration 4: products are of type 0 or 2, configuration 5: products are of type 1 or 2, configuration 6: products are of type 0 or 1 or 2. The cost for configuring the truck from a configuration A to a configuration B depends on A and B. The configuration of the truck determines its capacity and its loading cost. A delivery consists of loading the truck with one or several orders for the same customer. Both the cost (for configuring and loading the truck) and the number of deliveries needed to deliver all the orders must be minimized, the cost being the most important criterion. Please refer to documentation for appropriate setup of solving configuration. How decision optimization can help Prescriptive analytics technology recommends actions based on desired outcomes, taking into account specific scenarios, resources, and knowledge of past and current events. This insight can help your organization make better decisions and have greater control of business outcomes. Prescriptive analytics is the next step on the path to insight-based actions. It creates value through synergy with predictive analytics, which analyzes data to predict future outcomes. Prescriptive analytics takes that insight to the next level by suggesting the optimal way to handle that future situation. Organizations that can act fast in dynamic conditions and make superior decisions in uncertain environments gain a strong competitive advantage. <br/> For example: Automate complex decisions and trade-offs to better manage limited resources. Take advantage of a future opportunity or mitigate a future risk. Proactively update recommendations based on changing events. Meet operational goals, increase customer loyalty, prevent threats and fraud, and optimize business processes. Use decision optimization Step 1: Download the library Run the following code to install Decision Optimization CPLEX Modeling library. The DOcplex library contains the two modeling packages, Mathematical Programming and Constraint Programming, referred to earlier. End of explanation """ from docplex.cp.model import * # List of possible truck configurations. Each tuple is (load, cost) with: # load: max truck load for this configuration, # cost: cost for loading the truck in this configuration TRUCK_CONFIGURATIONS = ((11, 2), (11, 2), (11, 2), (11, 3), (10, 3), (10, 3), (10, 4)) # List of customer orders. # Each tuple is (customer index, volume, product type) CUSTOMER_ORDERS = ((0, 3, 1), (0, 4, 2), (0, 3, 0), (0, 2, 1), (0, 5, 1), (0, 4, 1), (0, 11, 0), (1, 4, 0), (1, 5, 0), (1, 2, 0), (1, 4, 2), (1, 7, 2), (1, 3, 2), (1, 5, 0), (1, 2, 2), (2, 5, 1), (2, 6, 0), (2, 11, 2), (2, 1, 0), (2, 6, 0), (2, 3, 0)) # Transition costs between configurations. # Tuple (A, B, TCost) means that the cost of modifying the truck from configuration A to configuration B is TCost CONFIGURATION_TRANSITION_COST = tuple_set(((0, 0, 0), (0, 1, 0), (0, 2, 0), (0, 3, 10), (0, 4, 10), (0, 5, 10), (0, 6, 15), (1, 0, 0), (1, 1, 0), (1, 2, 0), (1, 3, 10), (1, 4, 10), (1, 5, 10), (1, 6, 15), (2, 0, 0), (2, 1, 0), (2, 2, 0), (2, 3, 10), (2, 4, 10), (2, 5, 10), (2, 6, 15), (3, 0, 3), (3, 1, 3), (3, 2, 3), (3, 3, 0), (3, 4, 10), (3, 5, 10), (3, 6, 15), (4, 0, 3), (4, 1, 3), (4, 2, 3), (4, 3, 10), (4, 4, 0), (4, 5, 10), (4, 6, 15), (5, 0, 3), (5, 1, 3), (5, 2, 3), (5, 3, 10), (5, 4, 10), (5, 5, 0), (5, 6, 15), (6, 0, 3), (6, 1, 3), (6, 2, 3), (6, 3, 10), (6, 4, 10), (6, 5, 10), (6, 6, 0) )) # Compatibility between the product types and the configuration of the truck # allowedContainerConfigs[i] = the array of all the configurations that accept products of type i ALLOWED_CONTAINER_CONFIGS = ((0, 3, 4, 6), (1, 3, 5, 6), (2, 4, 5, 6)) """ Explanation: Note that the more global package <i>docplex</i> contains another subpackage <i>docplex.mp</i> that is dedicated to Mathematical Programming, another branch of optimization. Step 2: Model the data Next section defines the data of the problem. End of explanation """ nbTruckConfigs = len(TRUCK_CONFIGURATIONS) maxTruckConfigLoad = [tc[0] for tc in TRUCK_CONFIGURATIONS] truckCost = [tc[1] for tc in TRUCK_CONFIGURATIONS] maxLoad = max(maxTruckConfigLoad) nbOrders = len(CUSTOMER_ORDERS) nbCustomers = 1 + max(co[0] for co in CUSTOMER_ORDERS) volumes = [co[1] for co in CUSTOMER_ORDERS] productType = [co[2] for co in CUSTOMER_ORDERS] # Max number of truck deliveries (estimated upper bound, to be increased if no solution) maxDeliveries = 15 """ Explanation: Step 3: Set up the prescriptive model Prepare data for modeling Next section extracts from problem data the parts that are frequently used in the modeling section. End of explanation """ mdl = CpoModel(name="trucks") """ Explanation: Create CPO model End of explanation """ # Configuration of the truck for each delivery truckConfigs = integer_var_list(maxDeliveries, 0, nbTruckConfigs - 1, "truckConfigs") # In which delivery is an order where = integer_var_list(nbOrders, 0, maxDeliveries - 1, "where") # Load of a truck load = integer_var_list(maxDeliveries, 0, maxLoad, "load") # Number of deliveries that are required nbDeliveries = integer_var(0, maxDeliveries) # Identification of which customer is assigned to a delivery customerOfDelivery = integer_var_list(maxDeliveries, 0, nbCustomers, "customerOfTruck") # Transition cost for each delivery transitionCost = integer_var_list(maxDeliveries - 1, 0, 1000, "transitionCost") """ Explanation: Define the decision variables End of explanation """ # transitionCost[i] = transition cost between configurations i and i+1 for i in range(1, maxDeliveries): auxVars = (truckConfigs[i - 1], truckConfigs[i], transitionCost[i - 1]) mdl.add(allowed_assignments(auxVars, CONFIGURATION_TRANSITION_COST)) # Constrain the volume of the orders in each truck mdl.add(pack(load, where, volumes, nbDeliveries)) for i in range(0, maxDeliveries): mdl.add(load[i] <= element(truckConfigs[i], maxTruckConfigLoad)) # Compatibility between the product type of an order and the configuration of its truck for j in range(0, nbOrders): configOfContainer = integer_var(ALLOWED_CONTAINER_CONFIGS[productType[j]]) mdl.add(configOfContainer == element(truckConfigs, where[j])) # Only one customer per delivery for j in range(0, nbOrders): mdl.add(element(customerOfDelivery, where[j]) == CUSTOMER_ORDERS[j][0]) # Non-used deliveries are at the end for j in range(1, maxDeliveries): mdl.add((load[j - 1] > 0) | (load[j] == 0)) # Dominance: the non used deliveries keep the last used configuration mdl.add(load[0] > 0) for i in range(1, maxDeliveries): mdl.add((load[i] > 0) | (truckConfigs[i] == truckConfigs[i - 1])) # Dominance: regroup deliveries with same configuration for i in range(maxDeliveries - 2, 0, -1): ct = true() for p in range(i + 1, maxDeliveries): ct = (truckConfigs[p] != truckConfigs[i - 1]) & ct mdl.add((truckConfigs[i] == truckConfigs[i - 1]) | ct) """ Explanation: Express the business constraints End of explanation """ # Objective: first criterion for minimizing the cost for configuring and loading trucks # second criterion for minimizing the number of deliveries cost = sum(transitionCost) + sum(element(truckConfigs[i], truckCost) * (load[i] != 0) for i in range(maxDeliveries)) mdl.add(minimize_static_lex([cost, nbDeliveries])) """ Explanation: Express the objective End of explanation """ # Search strategy: first assign order to truck mdl.set_search_phases([search_phase(where)]) # Solve model print("\nSolving model....") msol = mdl.solve(TimeLimit=20) """ Explanation: Solve with Decision Optimization solve service End of explanation """ if msol.is_solution(): print("Solution: ") ovals = msol.get_objective_values() print(" Configuration cost: {}, number of deliveries: {}".format(ovals[0], ovals[1])) for i in range(maxDeliveries): ld = msol.get_value(load[i]) if ld > 0: stdout.write(" Delivery {:2d}: config={}".format(i,msol.get_value(truckConfigs[i]))) stdout.write(", items=") for j in range(nbOrders): if (msol.get_value(where[j]) == i): stdout.write(" <{}, {}, {}>".format(j, productType[j], volumes[j])) stdout.write('\n') else: stdout.write("Solve status: {}\n".format(msol.get_solve_status())) """ Explanation: Step 4: Investigate the solution and then run an example analysis End of explanation """
davicsilva/dsintensive
notebooks/eda-miniprojects/human_temp/sliderule_dsi_inferential_statistics_exercise_1.ipynb
apache-2.0
import pandas as pd df = pd.read_csv('data/human_body_temperature.csv') # Your work here. # Load Matplotlib + Seaborn and SciPy libraries import matplotlib.pyplot as plt import seaborn as sns import numpy as np from scipy import stats from scipy.stats import norm from statsmodels.stats.weightstats import ztest %matplotlib inline df.head(5) """ Explanation: What is the True Normal Human Body Temperature? Background The mean normal body temperature was held to be 37$^{\circ}$C or 98.6$^{\circ}$F for more than 120 years since it was first conceptualized and reported by Carl Wunderlich in a famous 1868 book. But, is this value statistically correct? <h3>Exercises</h3> <p>In this exercise, you will analyze a dataset of human body temperatures and employ the concepts of hypothesis testing, confidence intervals, and statistical significance.</p> <p>Answer the following questions <b>in this notebook below and submit to your Github account</b>.</p> <ol> <li> Is the distribution of body temperatures normal? <ul> <li> Although this is not a requirement for CLT to hold (read CLT carefully), it gives us some peace of mind that the population may also be normally distributed if we assume that this sample is representative of the population. </ul> <li> Is the sample size large? Are the observations independent? <ul> <li> Remember that this is a condition for the CLT, and hence the statistical tests we are using, to apply. </ul> <li> Is the true population mean really 98.6 degrees F? <ul> <li> Would you use a one-sample or two-sample test? Why? <li> In this situation, is it appropriate to use the $t$ or $z$ statistic? <li> Now try using the other test. How is the result be different? Why? </ul> <li> Draw a small sample of size 10 from the data and repeat both tests. <ul> <li> Which one is the correct one to use? <li> What do you notice? What does this tell you about the difference in application of the $t$ and $z$ statistic? </ul> <li> At what temperature should we consider someone's temperature to be "abnormal"? <ul> <li> Start by computing the margin of error and confidence interval. </ul> <li> Is there a significant difference between males and females in normal temperature? <ul> <li> What test did you use and why? <li> Write a story with your conclusion in the context of the original problem. </ul> </ol> You can include written notes in notebook cells using Markdown: - In the control panel at the top, choose Cell > Cell Type > Markdown - Markdown syntax: http://nestacms.com/docs/creating-content/markdown-cheat-sheet Resources Information and data sources: http://www.amstat.org/publications/jse/datasets/normtemp.txt, http://www.amstat.org/publications/jse/jse_data_archive.htm Markdown syntax: http://nestacms.com/docs/creating-content/markdown-cheat-sheet End of explanation """ ax = sns.distplot(df[['temperature']], rug=True, axlabel='Temperature (o F)') """ Explanation: Questions and Answers 1. Is the distribution of body temperatures normal? Yes. Based on the shape of the curve plotted with sample data, we have a normal distribution of body temperature. End of explanation """ print("Yes. We have *" + str(df['temperature'].size) + "* records in the sample data file.") print("There is no connection or dependence between the measured temperature values, in other words, the observations are independent.") # Sample (dataset) size df['temperature'].describe() # Population mean temperature POP_MEAN = 98.6 # Sample size, mean and standard deviation sample_size = df['temperature'].count() sample_mean = df['temperature'].mean() sample_std = df['temperature'].std(axis=0) """ Explanation: 2. Is the sample size large? Are the observations independent? Sample size End of explanation """ print("Population mean temperature (given): POP_MEAN = " + str(POP_MEAN)) print("Sample size: sample_size = " + str(sample_size)) print("Sample mean: sample_mean = "+ str(sample_mean)) print("Sample standard deviation: sample_std = "+ str(sample_std)) """ Explanation: What we know about population and what we get from sample dataset<br> End of explanation """ print("* Ho or Null hypothesis: Average body temperature *is* " + str(POP_MEAN)+" degrees F.") print("* Ha or Alternative hypothesis: Average body temperature *is not* " + str(POP_MEAN)+" degrees F.") """ Explanation: 3. Is the true population mean really 98.6 degrees F? Hypothesis: End of explanation """ t = ((sample_mean - POP_MEAN)/sample_std)*np.sqrt(sample_size) print("t = " + str(t)) """ Explanation: t-test formula: where: x = sample mean uo = population mean s = sample standard deviation n = sample size t test t = ((sample_mean - population_mean)/ sample_std_deviation ) * sqrt(sample_size) End of explanation """ degree = sample_size - 1 print("degrees of freedom =" + str(degree)) """ Explanation: degrees of freedom End of explanation """ p = 1 - stats.t.cdf(abs(t),df=degree) print("p-value = %.10f" % p) """ Explanation: p-value End of explanation """ p2 = 2*p print("p-value = %.10f (2 * p-value)" % p2) """ Explanation: 2 * p-value is the new p-value: End of explanation """ ALFA = 0.05 print(". alfa = " + str(ALFA)) print(". p-value = %.10f" % p2) """ Explanation: We assume that: Significant level (alfa) = 0.05 (cutoff level) End of explanation """ print("----") print(". Sample mean: sample_mean = "+ str(sample_mean)) print(". Population mean temperature (given): POP_MEAN = " + str(POP_MEAN)) print(". Population standard deviation: sample_std = "+ str(sample_std)) print(". Sample size: sample_size = " + str(sample_size)) print("----") """ Explanation: ---- The smaller the p-value, the greater the evidence against the Ho (Null Hypothesis); we found a p-value <= alfa, therefore, we do not have strong evidence to accept the Null Hypothesis (Ho); So, we can assume da Ha (alternative hypothesis): average body temperature is not 98.6 degrees F. ---- a) Would you use a one-sample or two-sample test? Why? Two-sample test, once we want to know if the result is different of a reference value: 98.6 degrees F. b) In this situation, is it appropriate to use the t or z statistic? Once we do not know the population standard deviation, it is appropriate to use t statistic.<br> c) Now try using the other test. How is the result be different? Why? The equation for Z statistic is: Assuming that population standard deviation = sample standard deviation, we have: End of explanation """ z = ((sample_mean - POP_MEAN)/sample_std)*np.sqrt(sample_size) print("Z value or z_score: z = " + str(z)) """ Explanation: Z test Z = ((sample_mean - population_mean)/ population_std_deviation ) * sqrt(sample_size) <br> Note: we are assuming that population standard deviation = sample standard deviation (sample_std)<br> End of explanation """ # P-Value two sided p_value_z = 1 - (norm.sf(abs(z))*2) print("P-Value = %.15f" % p_value_z) """ Explanation: p-value End of explanation """ ALFA = 0.05 print(". alfa = " + str(ALFA)) print(". p-value = %.15f" % p_value_z) """ Explanation: We (also) assume that: Significant level (alfa) = 0.05 (cutoff level) End of explanation """ # A sample with randomly 10 records from original dataset df_sample10 = df.sample(n=10) df_sample10['temperature'].count() """ Explanation: --------------------------------------------------------------------------------------------------------------------------------- The smaller the p-value, the greater the evidence against the Ho (Null Hypothesis); we found a p-value > alfa, therefore, we do have strong evidence to accept the Null Hypothesis (Ho): - in this case, the average body temperature is 98.6 degrees F. --------------------------------------------------------------------------------------------------------------------------------- 4. Draw a small sample of size 10 from the data and repeat both tests. End of explanation """ ax = sns.distplot(df_sample10[['temperature']], rug=True, axlabel='Temperature (o F)') """ Explanation: The histogram: End of explanation """ sample10_size = df_sample10['temperature'].count() sample10_mean = df_sample10['temperature'].mean() sample10_std = df_sample10['temperature'].std(axis=0) print("Population mean temperature (given): POP_MEAN = " + str(POP_MEAN)) print("Sample-10 size: sample_size = " + str(sample10_size)) print("Sample-10 mean: sample_mean = "+ str(sample10_mean)) print("Sample-10 standard deviation: sample_std = "+ str(sample10_std)) """ Explanation: Sample size, mean and standard deviation End of explanation """ t = ((sample10_mean - POP_MEAN)/sample10_std)*np.sqrt(sample10_size) print("t = " + str(t)) """ Explanation: t-test formula: where: x = sample mean uo = population mean s = sample standard deviation n = sample size t test t = ((sample_mean - population_mean)/ sample_std_deviation ) * sqrt(sample_size) End of explanation """ degree = sample10_size - 1 print("degrees of freedom =" + str(degree)) """ Explanation: degrees of freedom End of explanation """ p_value = 1 - stats.t.cdf(abs(t),df=degree) # p-value considering two-tails p_value = 2*p_value print("p-value =" + str(p_value)) """ Explanation: p-value End of explanation """ ALFA = 0.05 print(". alfa = " + str(ALFA)) print(". p-value = %.15f" % p_value) """ Explanation: We (also) assume that: Significant level (alfa) = 0.05 (cutoff level) End of explanation """ z = ((sample10_mean - POP_MEAN)/sample10_std)*np.sqrt(sample10_size) print("Z value or z_score: z = " + str(z)) # P-Value two sided p_value_z = 1 - (norm.sf(abs(z))*2) print("P-Value = %.15f" % p_value_z) """ Explanation: ---- The smaller the p-value, the greater the evidence against the Ho (Null Hypothesis); we found a p-value > alfa, therefore, we do have strong evidence to accept the Null Hypothesis (Ho): - the average body temperature is 98.6 degrees F. ---- Z test Z = ((sample_mean - population_mean)/ population_std_deviation ) * sqrt(sample_size) <br> Note: we are assuming that population standard deviation = sample standard deviation (sample10_std)<br> End of explanation """ ALFA = 0.05 print(". alfa = " + str(ALFA)) print(". p-value = %.15f" % p_value_z) """ Explanation: We (also) assume that: Significant level (alfa) = 0.05 (cutoff level) End of explanation """ # Sample (dataset) size df['temperature'].describe() median = df['temperature'].mean() std = df['temperature'].std(axis=0) print("One standard deviation (std) is %.3f degrees F." %std) print("Three standard deviation (std) is %.3f degrees F." % (3*std)) """ Explanation: --------------------------------------------------------------------------------------------------------------------------------- The smaller the p-value, the greater the evidence against the Ho (Null Hypothesis); we found a p-value > alfa, therefore, we do have strong evidence to accept the Null Hypothesis (Ho): - for this sample, the average body temperature IS 98.6 degrees F. --------------------------------------------------------------------------------------------------------------------------------- 5. At what temperature should we consider someone's temperature to be "abnormal"? Normal curve (bell curve) standard deviation percentils: We can consider "abnormal" those people that have body temperature different than 99.7% of the population. In other words, those whose temperature is 3-std (standard deviation) far from the mean. From the original dataset we have: End of explanation """ lim_low = median - (3*std) lim_high = median + (3*std) print("A body temperature different than 99.7% of the population is: greater than "+ str(lim_high) + " and less than " + str(lim_low) + " degrees F.") """ Explanation: So, a "abnormal" body temperature is between -3std and +3std: End of explanation """ # Female temperature (mean and standard deviation) df_female = df.loc[df['gender'] == 'F'] ax = sns.distplot(df_female[['temperature']]) print("Female temperature: mean = %f | std = %f" % (df_female['temperature'].mean(), df_female['temperature'].std())) # Male temperature (mean and standard deviation) df_male = df.loc[df['gender'] == 'M'] ax = sns.distplot(df_male[['temperature']]) print("Male temperature: mean = %f | std = %f" % (df_male['temperature'].mean(), df_male['temperature'].std())) # Plotting histogram based on gender (Female/Male) grid = sns.FacetGrid(df, col="gender") grid.map(plt.hist, "temperature", color="y") # Plotting Female/Male temperatures using Seaborn Pairplot sns.pairplot(df, hue='gender', size=2.5) """ Explanation: 6. Is there a significant difference between males and females in normal temperature? End of explanation """
tensorflow/docs-l10n
site/ko/guide/function.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2020 The TensorFlow Authors. End of explanation """ import tensorflow as tf """ Explanation: tf.function으로 성능 향상하기 <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/guide/function"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />TensorFlow.org 에서 보기</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/guide/function.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Google Colab)에서 실행하기</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/guide/function.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />깃헙(GitHub) 소스 보기</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/guide/function.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도 불구하고 공식 영문 문서의 내용과 일치하지 않을 수 있습니다. 이 번역에 개선할 부분이 있다면 tensorflow/docs-l10n 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다. 문서 번역이나 리뷰에 참여하려면 docs-ko@tensorflow.org로 메일을 보내주시기 바랍니다. 텐서플로 2에서는 즉시 실행(eager execution)이 기본적으로 활성화되어 있습니다. 직관적이고 유연한 사용자 인터페이스를 제공하지만 성능과 배포에 비용이 더 듭니다(하나의 연산을 실행할 때는 훨씬 간단하고 빠릅니다). 성능을 높이고 이식성이 좋은 모델을 만들려면 tf.function을 사용해 그래프로 변환하세요. 하지만 조심해야 할 점이 있습니다. tf.function은 무조건 속도를 높여주는 마법의 은총알이 아닙니다! 이 가이드는 tf.function의 이면에 있는 개념을 이해하고 사용법을 완전히 터득할 수 있도록 도울 것입니다. 여기서 배울 주요 내용과 권고 사항은 다음과 같습니다: 즉시 실행 모드에서 디버깅한 다음 @tf.function으로 데코레이팅하세요. 객체 변경(object mutation)이나 리스트 요소 추가 같은 파이썬의 부수 효과에 의존하지 마세요. tf.function은 텐서플로 연산과 가장 잘 동작합니다: 넘파이와 파이썬 호출은 상수로 바뀝니다. 설정 End of explanation """ import traceback import contextlib # 에러 출력을 위한 헬퍼 함수 @contextlib.contextmanager def assert_raises(error_class): try: yield except error_class as e: print('기대하는 예외 발생 \n {}:'.format(error_class)) traceback.print_exc(limit=2) except Exception as e: raise e else: raise Exception('{}를 기대했지만 아무런 에러도 발생되지 않았습니다!'.format( error_class)) """ Explanation: 에러 출력을 위한 헬퍼 함수를 정의합니다: End of explanation """ @tf.function def add(a, b): return a + b add(tf.ones([2, 2]), tf.ones([2, 2])) # [[2., 2.], [2., 2.]] v = tf.Variable(1.0) with tf.GradientTape() as tape: result = add(v, 1.0) tape.gradient(result, v) """ Explanation: 기초 tf.function으로 정의한 함수는 기본 텐서플로 연산과 같습니다. 즉시 실행 모드로 실행하거나 그레이디언트를 계산할 수 있습니다. End of explanation """ @tf.function def dense_layer(x, w, b): return add(tf.matmul(x, w), b) dense_layer(tf.ones([3, 2]), tf.ones([2, 2]), tf.ones([2])) """ Explanation: 다른 함수 내부에 사용할 수 있습니다. End of explanation """ import timeit conv_layer = tf.keras.layers.Conv2D(100, 3) @tf.function def conv_fn(image): return conv_layer(image) image = tf.zeros([1, 200, 200, 100]) # 워밍 업 conv_layer(image); conv_fn(image) print("즉시 실행 합성곱:", timeit.timeit(lambda: conv_layer(image), number=10)) print("tf.function 합성곱:", timeit.timeit(lambda: conv_fn(image), number=10)) print("합성곱 연산 속도에 큰 차이가 없습니다.") """ Explanation: tf.function은 즉시 실행 모드 보다 빠릅니다. 특히 그래프에 작은 연산이 많을 때 그렇습니다. 하지만 (합성곱처럼) 계산량이 많은 연산 몇 개로 이루어진 그래프는 속도 향상이 크지 않습니다. End of explanation """ # 함수와 다형성 @tf.function def double(a): print("트레이싱:", a) return a + a print(double(tf.constant(1))) print() print(double(tf.constant(1.1))) print() print(double(tf.constant("a"))) print() """ Explanation: 디버깅 일반적으로 tf.function 보다 즉시 실행 모드가 디버깅하기 쉽습니다. tf.function으로 데코레이팅하기 전에 즉시 실행 모드에서 에러가 없는지 확인하세요. 디버깅 과정을 위해 tf.config.run_functions_eagerly(True)으로 전체 tf.function을 비활성화하고 나중에 다시 활성화할 수 있습니다. tf.function 함수에서 버그를 추적할 때 다음 팁을 참고하세요: - 파이썬 print 함수는 트레이싱(tracing)하는 동안에만 호출되므로 함수가 (재)트레이싱될 때 추적하는데 도움이 됩니다. - tf.print 함수는 언제나 실행되므로 실행하는 동안 중간 값을 추적할 때 도움이 됩니다. - tf.debugging.enable_check_numerics을 사용하면 쉽게 NaN과 Inf가 발생되는 곳을 추적할 수 있습니다. - pdb는 어떻게 트레이싱이 일어나는지 이해하는데 도움이 됩니다(주의: pdb는 오토그래프(AutoGraph)가 변환한 소스 코드를 보여줄 것입니다). 트레이싱과 다형성 파이썬의 동적 타이핑 덕분에 여러 종류의 매개변수 타입을 사용해 함수를 호출할 수 있고 파이썬은 각기 다르게 수행됩니다. 반면 텐서플로 그래프는 정적인 dtype과 shape 차원이 필요합니다. tf.function은 올바른 그래프를 생성하기 위해 필요하면 함수를 다시 트레이싱하여 이 문제를 해결합니다. tf.function을 사용할 때 발생하는 문제점은 대부분 이런 재트레이싱(retracing) 동작에서 옵니다. 다른 종류의 매개변수를 함수를 호출할 때 무슨 일이 일어나는지 확인해 보죠. End of explanation """ def f(): print('트레이싱!') tf.print('실행') tf.function(f)() tf.function(f)() """ Explanation: 트레이싱 동작을 제어하기 위해 다음 기법을 사용할 수 있습니다: 새로운 tf.function을 만듭니다. 별도의 tf.function 객체는 트레이싱이 따로 일어납니다. End of explanation """ print("콘크리트 함수 얻기") double_strings = double.get_concrete_function(tf.TensorSpec(shape=None, dtype=tf.string)) print("트레이싱된 함수 실행") print(double_strings(tf.constant("a"))) print(double_strings(a=tf.constant("b"))) print("콘크리트 함수에 다른 타입을 사용하면 예외가 발생합니다") with assert_raises(tf.errors.InvalidArgumentError): double_strings(tf.constant(1)) """ Explanation: get_concrete_function 메서드를 사용해 트레이싱된 특정 함수를 얻을 수 있습니다. End of explanation """ @tf.function(input_signature=(tf.TensorSpec(shape=[None], dtype=tf.int32),)) def next_collatz(x): print("트레이싱", x) return tf.where(x % 2 == 0, x // 2, 3 * x + 1) print(next_collatz(tf.constant([1, 2]))) # input_signature에 1-D 텐서를 지정했기 때문에 다음은 실패합니다. with assert_raises(ValueError): next_collatz(tf.constant([[1, 2], [3, 4]])) """ Explanation: tf.function에 input_signature를 지정하여 트레이싱을 제한할 수도 있습니다. End of explanation """ def train_one_step(): pass @tf.function def train(num_steps): print("트레이싱 num_steps = {}".format(num_steps)) for _ in tf.range(num_steps): train_one_step() train(num_steps=10) train(num_steps=20) """ Explanation: 언제 다시 트레이싱되나요? 다형성을 지원하는 tf.function은 트레이싱으로 생성된 콘크리트 함수를 캐싱합니다. 이 캐시의 키는 함수의 위치 매개변수(args)와 키워드 매개변수(kwargs)에서 생성된 키의 튜플입니다. tf.Tensor 매개변수를 위해 생성된 키는 차원 개수와 타입이 됩니다. 파이썬 기본 자료형(정수, 실수, 문자열, 불리언)으로 생성된 키는 해당 변수의 값이 됩니다. 그외 다른 파이썬 타입에서 키는 id()를 기반으로 합니다. 따라서 클래스 메서드는 인스턴스마다 독립적으로 트레이싱됩니다. 향후 텐서플로는 파이썬 객체를 안전하게 텐서로 변환하기 위한 고급 캐싱 기능을 제공할 수 있습니다. 콘크리트 함수를 참고하세요. 파이썬 매개변수 vs 텐서 매개변수 하이퍼파라미터 조작하고 그래프를 구성하기 위해 파이썬 매개변수가 자주 사용됩니다. 예를 들면 num_layers=10이나 training=True, nonlinearity='relu'입니다. 파이썬 매개변수가 바뀌면 그래프가 다시 트레이싱됩니다. 하지만 파이썬 매개변수가 그래프 구성에 사용되지 않을 수 있습니다. 이런 경우 파이썬 값이 변하면 불필요한 재트레이싱을 일으킵니다. 예를 들어 다음은 오토그래프가 동적으로 펼치는 훈련 반복 루프입니다. 다중 트레이싱이 되었지만 생성된 그래프는 실제로 동일하기 때문에 조금 비효율적입니다. End of explanation """ train(num_steps=tf.constant(10)) train(num_steps=tf.constant(20)) """ Explanation: 이를 해결하는 간단한 방법은 생성된 그래프에 영향을 미치지 않도록 매개변수를 Tensor로 바꾸는 것입니다. End of explanation """ @tf.function def f(x): print("트레이싱", x) tf.print("실행", x) f(1) f(1) f(2) """ Explanation: tf.function의 부수 효과 일반적으로 (출력이나 객체 변경 같은) 파이썬 부수 효과(side effect)는 트레이싱 동안에만 일어납니다. 어떻게 tf.function에서 안정적으로 부수 효과를 일으킬 수 있을까요? 일반적인 규칙은 파이썬 부수 효과만을 사용하여 트레이싱을 디버깅하는 것입니다. 그외에는 tf.Variable.assign, tf.print, tf.summary 같은 텐서플로 연산이 텐서플로 런타임에 의해 코드가 트레이싱되고 실행되는지 확인하는 가장 좋은 방법입니다. 일반적으로 함수 스타일을 사용하는 것이 가장 좋습니다. End of explanation """ external_list = [] def side_effect(x): print('파이썬 부수 효과') external_list.append(x) @tf.function def f(x): tf.py_function(side_effect, inp=[x], Tout=[]) f(1) f(1) f(1) assert len(external_list) == 3 # py_function이 1을 tf.constant(1)로 바꾸기 때문에 .numpy()를 호출해야 합니다. assert external_list[0].numpy() == 1 """ Explanation: tf.function을 호출할 때마다 파이썬 코드를 실행하려면 tf.py_function이 해결책입니다. tf.py_function의 단점은 이식성과 성능이 좋지 않고 분산 환경(다중 GPU나 다중 TPU)에서 잘 동작하지 않는다는 것입니다. 또한 tf.py_function은 미분 가능하도록 그래프를 만들기 때문에 모든 입력/출력을 텐서로 변환합니다. End of explanation """ external_var = tf.Variable(0) @tf.function def buggy_consume_next(iterator): external_var.assign_add(next(iterator)) tf.print("external_var의 값:", external_var) iterator = iter([0, 1, 2, 3]) buggy_consume_next(iterator) # 다음은 반복자의 다음 값을 추출하지 않고 첫 번째 값을 재사용합니다. buggy_consume_next(iterator) buggy_consume_next(iterator) """ Explanation: 파이썬 상태 주의하기 제러네이터와 반복자(iterator) 같은 파이썬의 많은 기능은 상태 추적을 위해 파이썬 런타임에 의존합니다. 일반적으로 이런 요소들은 즉시 실행 모드와 같이 동작하지만 트레이싱 동작 때문에 tf.function 안에서는 예상밖의 일이 일어날 수 있습니다. 예를 하나 들면, 다음 반복자 값을 얻는 것이 파이썬 부수 효과이기 때문에 트레이싱 동안에만 일어납니다. End of explanation """ @tf.function def f(x): v = tf.Variable(1.0) v.assign_add(x) return v with assert_raises(ValueError): f(1.0) """ Explanation: 변수 코드가 의도한 순서대로 실행되는 것처럼 tf.function에서 매우 쉽게 변수를 생성하고 사용할 수 있습니다. 하지만 아주 중요한 주의 사항이 있습니다. 변수는 즉시 실행 모드와 그래프 모드에서 다르게 동작하는 코드를 만들 수 있습니다. 특히 호출마다 새로운 변수를 만들 때 일어납니다. 트레이싱 구조 때문에 tf.function은 호출마다같은 변수를 재사용합니다. 하지만 즉시 실행 모드에서는 호출마다 새로운 변수가 생성됩니다. 이런 실수를 방지하기 위해 tf.function은 위험한 변수 생성이 감지되면 에러를 발생합니다. End of explanation """ v = tf.Variable(1.0) @tf.function def f(x): return v.assign_add(x) print(f(1.0)) # 2.0 print(f(2.0)) # 4.0 """ Explanation: 하지만 모호하지 않은 코드는 괜찮습니다. End of explanation """ class C: pass obj = C() obj.v = None @tf.function def g(x): if obj.v is None: obj.v = tf.Variable(1.0) return obj.v.assign_add(x) print(g(1.0)) # 2.0 print(g(2.0)) # 4.0 """ Explanation: 함수가 처음 호출될 때만 변수가 생성되도록 tf.function 안에서 변수를 생성할 수 있습니다. End of explanation """ state = [] @tf.function def fn(x): if not state: state.append(tf.Variable(2.0 * x)) state.append(tf.Variable(state[0] * 3.0)) return state[0] * x * state[1] print(fn(tf.constant(1.0))) print(fn(tf.constant(3.0))) """ Explanation: 변수 초기화가 함수 매개변수와 다른 변수 값에 의존할 수 있습니다. 올바른 초기화 순서를 찾기 위해 제어 의존성을 생성하는 메서드를 사용할 수 있습니다. End of explanation """ # 간단한 루프 @tf.function def f(x): while tf.reduce_sum(x) > 1: tf.print(x) x = tf.tanh(x) return x f(tf.random.uniform([5])) """ Explanation: 오토그래프 변환 오토그래프(AutoGraph)는 tf.function안에 기본으로 활성화되어 있습니다. 파이썬의 즉시 실행 코드를 그래프 호환 텐서플로 연산으로 변환합니다. 여기에는 if, for, while 같은 제어 흐름이 포함됩니다. tf.cond와 tf.while_loop 같은 텐서플로 연산을 여전히 사용할 수 있지만 파이썬으로 제어 흐름을 작성하는 것이 만들기도 이해하기도 쉽습니다. End of explanation """ print(tf.autograph.to_code(f.python_function)) """ Explanation: 관심있다면 오토그래프가 생성한 코드를 확인해 볼 수 있습니다. End of explanation """ @tf.function def fizzbuzz(n): for i in tf.range(1, n + 1): print('루프 트레이싱') if i % 15 == 0: print('fizzbuzz 브랜치 트레이싱') tf.print('fizzbuzz') elif i % 3 == 0: print('fizz 브랜치 트레이싱') tf.print('fizz') elif i % 5 == 0: print('buzz 브랜치 트레이싱') tf.print('buzz') else: print('디폴트 브랜치 트레이싱') tf.print(i) fizzbuzz(tf.constant(5)) fizzbuzz(tf.constant(20)) """ Explanation: 조건문 오토그래프는 if &lt;condition&gt; 문장을 이와 대등한 tf.cond 호출로 변경합니다. 이런 대체는 &lt;condition&gt;이 텐서일 때 수행됩니다. 그렇지 않다면 if 문장은 파이썬 조건문으로 실행됩니다. 트레이싱하는 동안 파이썬 조건문을 실행하기 때문에 정확히 하나의 조건 분기만 그래프에 추가됩니다. 오토그래프가 없다면 이렇게 트레이싱된 그래프는 데이터에 따라 제어 흐름을 바꿀 수 없습니다. tf.cond는 조건 분기를 트레이싱하고 그래프에 추가하여 실행시 동적으로 분기를 선택합니다. 트레이싱때문에 의도치 않은 부수 효과가 발생될 수 있습니다. 더 자세한 내용은 오토그래프 트레이싱 효과를 참고하세요. End of explanation """ def measure_graph_size(f, *args): g = f.get_concrete_function(*args).graph print("{}({})는 그래프에 {}개의 노드를 포함합니다".format( f.__name__, ', '.join(map(str, args)), len(g.as_graph_def().node))) @tf.function def train(dataset): loss = tf.constant(0) for x, y in dataset: loss += tf.abs(y - x) # 의미없는 연산 return loss small_data = [(1, 1)] * 3 big_data = [(1, 1)] * 10 measure_graph_size(train, small_data) measure_graph_size(train, big_data) measure_graph_size(train, tf.data.Dataset.from_generator( lambda: small_data, (tf.int32, tf.int32))) measure_graph_size(train, tf.data.Dataset.from_generator( lambda: big_data, (tf.int32, tf.int32))) """ Explanation: 오토그래프가 변환한 if 문장에 대한 추가 제약 사항에 대해서는 레퍼런스 문서를 참고하세요. 반복문 오토그래프는 일부 for와 while 문장을 tf.while_loop와 같은 동등한 텐서플로 반복 연산으로 바꿉니다. 변환되지 않으면 파이썬 반복문으로 for와 while 반복문이 실행됩니다. 이런 대체는 다음과 같은 경우에 일어납니다: for x in y: y가 텐서이면 tf.while_loop로 변환됩니다. 특별히 y가 tf.data.Dataset인 경우에는 tf.data.Dataset 연산의 조합이 생성됩니다. while &lt;condition&gt;: &lt;condition&gt;이 텐서라면 tf.while_loop로 변환됩니다. 파이썬 반복문이 트레이싱 동안 실행되므로 매 반복마다 tf.Graph에 추가적인 연산이 포함됩니다. 텐서플로는 반복문 블럭을 트레이싱하여 실행시 얼마나 많은 반복이 수행될지 동적으로 선택합니다. 반복문 블럭은 생성된 tf.Graph에 한 번만 포함됩니다. 오토그래프가 변환한 for와 while 문장에 대한 추가 제약 사항에 대해서는 레퍼런스 문서를 참고하세요. 파이썬 데이터로 반복하기 흔히 저지르기 쉬운 실수는 tf.function 안에서 파이썬이나 넘파이 데이터로 반복하는 것입니다. 트레이싱 과정 동안 반복이 수행되기 때문에 반복마다 tf.Graph에 복사된 모델이 추가될 것입니다. tf.function으로 전체 훈련 반복을 감싸고 싶다면 안전한 방법은 데이터를 tf.data.Dataset으로 감싸서 오토그래프가 동적으로 훈련 반복을 펼치게 하는 것입니다. End of explanation """ batch_size = 2 seq_len = 3 feature_size = 4 def rnn_step(inp, state): return inp + state @tf.function def dynamic_rnn(rnn_step, input_data, initial_state): # [batch, time, features] -> [time, batch, features] input_data = tf.transpose(input_data, [1, 0, 2]) max_seq_len = input_data.shape[0] states = tf.TensorArray(tf.float32, size=max_seq_len) state = initial_state for i in tf.range(max_seq_len): state = rnn_step(input_data[i], state) states = states.write(i, state) return tf.transpose(states.stack(), [1, 0, 2]) dynamic_rnn(rnn_step, tf.random.uniform([batch_size, seq_len, feature_size]), tf.zeros([batch_size, feature_size])) """ Explanation: 데이터셋으로 파이썬/넘파이 데이터를 감쌀 때 tf.data.Dataset.from_generator와 tf.data.Dataset.from_tensors의 차이를 주의하세요. 전자는 파이썬에서 데이터를 유지하고 tf.py_function으로 데이터를 가져오므로 성능에 영향을 미칠 수 있습니다. 후자는 그래프에 있는 하나의 큰 tf.constant() 노드로 데이터를 복사하므로 메모리에 영향을 미칠 수 있습니다. TFRecordDataset, CsvDataset 등으로 파일에서 데이터를 읽는 것이 가장 효율적으로 데이터를 소비하는 방법입니다. 텐서플로는 파이썬을 거치지 않고 비동기적으로 데이터를 적재하고 프리페칭할 수 있기 때문입니다. 조금 더 자세한 정보는 tf.data guide를 참고하세요. 반복하면서 값을 누적하기 반복하면서 중간 값을 누적하는 패턴은 자주 있습니다. 보통 파이썬 리스트나 딕셔너리에 원소를 추가하는 방식을 사용합니다. 하지만 파이썬 부수 효과 때문에 동적으로 펼쳐지는 반복에서는 기대대로 동작하지 않습니다. 대신 tf.TensorArray를 사용해 동적으로 펼쳐지는 반복에서 결과를 누적하세요. End of explanation """
josh-gree/maths-with-python
05-classes-oop.ipynb
mit
p_normal = (12, -14, 0, 2) """ Explanation: Classes and Object Oriented Programming We have looked at functions which take input and return output (or do things to the input). However, sometimes it is useful to think about objects first rather than the actions applied to them. Think about a polynomial, such as the cubic \begin{equation} p(x) = 12 - 14 x + 2 x^3. \end{equation} This is one of the standard forms that we would expect to see for a polynomial. We could imagine representing this in Python using a container containing the coefficients, such as: End of explanation """ p_roots = (1, 2, -3) """ Explanation: The order of the polynomial is given by the number of coefficients (minus one), which is given by len(p_normal)-1. However, there are many other ways it could be written, which are useful in different contexts. For example, we are often interested in the roots of the polynomial, so would want to express it in the form \begin{equation} p(x) = 2 (x - 1)(x - 2)(x + 3). \end{equation} This allows us to read off the roots directly. We could imagine representing this in Python using a container containing the roots, such as: End of explanation """ p_leading_term = 2 """ Explanation: combined with a single variable containing the leading term, End of explanation """ class Polynomial(object): explanation = "I am a polynomial" def explain(self): print(self.explanation) """ Explanation: We see that the order of the polynomial is given by the number of roots (and hence by len(p_roots)). This form represents the same polynomial but requires two pieces of information (the roots and the leading coefficient). The different forms are useful for different things. For example, if we want to add two polynomials the standard form makes it straightforward, but the factored form does not. Conversely, multiplying polynomials in the factored form is easy, whilst in the standard form it is not. But the key point is that the object - the polynomial - is the same: the representation may appear different, but it's the object itself that we really care about. So we want to represent the object in code, and work with that object. Classes Python, and other languages that include object oriented concepts (which is most modern languages) allow you to define and manipulate your own objects. Here we will define a polynomial object step by step. End of explanation """ p = Polynomial() print(p.explanation) p.explain() p.explanation = "I change the string" p.explain() """ Explanation: We have defined a class, which is a single object that will represent a polynomial. We use the keyword class in the same way that we use the keyword def when defining a function. The definition line ends with a colon, and all the code defining the object is indented by four spaces. The name of the object - the general class, or type, of the thing that we're defining - is Polynomial. The convention is that class names start with capital letters, but this convention is frequently ignored. The type of object that we are building on appears in brackets after the name of the object. The most basic thing, which is used most often, is the object type as here. Class variables are defined in the usual way, but are only visible inside the class. Variables that are set outside of functions, such as explanation above, will be common to all class variables. Functions are defined inside classes in the usual way (using the def keyword, indented by four additional spaces). They work in a special way: they are not called directly, but only when you have a member of the class. This is what the self keyword does: it takes the specific instance of the class and uses its data. Class functions are often called methods. Let's see how this works on a specific example: End of explanation """ p = Polynomial() p.explanation = "Changed the string again" q = Polynomial() p.explanation = "Changed the string a third time" p.explain() q.explain() """ Explanation: The first line, p = Polynomial(), creates an instance of the class. That is, it creates a specific Polynomial. It is assigned to the variable named p. We can access class variables using the "dot" notation, so the string can be printed via p.explanation. The method that prints the class variable also uses the "dot" notation, hence p.explain(). The self variable in the definition of the function is the instance itself, p. This is passed through automatically thanks to the dot notation. Note that we can change class variables in specific instances in the usual way (p.explanation = ... above). This only changes the variable for that instance. To check that, let us define two polynomials: End of explanation """ class Polynomial(object): explanation = "I am a polynomial" def explain_to(self, caller): print("Hello, {}. {}.".format(caller,self.explanation)) """ Explanation: We can of course make the methods take additional variables. We modify the class (note that we have to completely re-define it each time): End of explanation """ r = Polynomial() r.explain_to("Alice") """ Explanation: We then use this, remembering that the self variable is passed through automatically: End of explanation """ class Polynomial(object): """Representing a polynomial.""" explanation = "I am a polynomial" def __init__(self, roots, leading_term): self.roots = roots self.leading_term = leading_term self.order = len(roots) def explain_to(self, caller): print("Hello, {}. {}.".format(caller,self.explanation)) print("My roots are {}.".format(self.roots)) """ Explanation: At the moment the class is not doing anything interesting. To do something interesting we need to store (and manipulate) relevant variables. The first thing to do is to add those variables when the instance is actually created. We do this by adding a special function (method) which changes how the variables of type Polynomial are created: End of explanation """ p = Polynomial(p_roots, p_leading_term) p.explain_to("Alice") q = Polynomial((1,1,0,-2), -1) q.explain_to("Bob") """ Explanation: This __init__ function is called when a variable is created. There are a number of special class functions, each of which has two underscores before and after the name. This is another Python convention that is effectively a rule: functions surrounded by two underscores have special effects, and will be called by other Python functions internally. So now we can create a variable that represents a specific polynomial by storing its roots and the leading term: End of explanation """ class Polynomial(object): """Representing a polynomial.""" explanation = "I am a polynomial" def __init__(self, roots, leading_term): self.roots = roots self.leading_term = leading_term self.order = len(roots) def __repr__(self): string = str(self.leading_term) for root in self.roots: if root == 0: string = string + "x" elif root > 0: string = string + "(x - {})".format(root) else: string = string + "(x + {})".format(-root) return string def explain_to(self, caller): print("Hello, {}. {}.".format(caller,self.explanation)) print("My roots are {}.".format(self.roots)) p = Polynomial(p_roots, p_leading_term) print(p) q = Polynomial((1,1,0,-2), -1) print(q) """ Explanation: Another special function that is very useful is __repr__. This gives a representation of the class. In essence, if you ask Python to print a variable, it will print the string returned by the __repr__ function. We can use this to create a simple string representation of the polynomial: End of explanation """ class Polynomial(object): """Representing a polynomial.""" explanation = "I am a polynomial" def __init__(self, roots, leading_term): self.roots = roots self.leading_term = leading_term self.order = len(roots) def __repr__(self): string = str(self.leading_term) for root in self.roots: if root == 0: string = string + "x" elif root > 0: string = string + "(x - {})".format(root) else: string = string + "(x + {})".format(-root) return string def __mul__(self, other): roots = self.roots + other.roots leading_term = self.leading_term * other.leading_term return Polynomial(roots, leading_term) def explain_to(self, caller): print("Hello, {}. {}.".format(caller,self.explanation)) print("My roots are {}.".format(self.roots)) p = Polynomial(p_roots, p_leading_term) q = Polynomial((1,1,0,-2), -1) r = p*q print(r) """ Explanation: The final special function we'll look at (although there are many more, many of which may be useful) is __mul__. This allows Python to multiply two variables together. With this we can take the product of two polynomials: End of explanation """ class Monomial(Polynomial): """Representing a monomial, which is a polynomial with leading term 1.""" def __init__(self, roots): self.roots = roots self.leading_term = 1 self.order = len(roots) """ Explanation: We now have a simple class that can represent polynomials and multiply them together, whilst printing out a simple string form representing itself. This can obviously be extended to be much more useful. Inheritance As we can see above, building a complete class from scratch can be lengthy and tedious. If there is another class that does much of what we want, we can build on top of that. This is the idea behind inheritance. In the case of the Polynomial we declared that it started from the object class in the first line defining the class: class Polynomial(object). But we can build on any class, by replacing object with something else. Here we will build on the Polynomial class that we've started with. A monomial is a polynomial whose leading term is simply 1. A monomial is a polynomial, and could be represented as such. However, we could build a class that knows that the leading term is always 1: there may be cases where we can take advantage of this additional simplicity. We build a new monomial class as follows: End of explanation """ m = Monomial((-1, 4, 9)) m.explain_to("Caroline") print(m) """ Explanation: Variables of the Monomial class are also variables of the Polynomial class, so can use all the methods and functions from the Polynomial class automatically: End of explanation """ class Monomial(Polynomial): """Representing a monomial, which is a polynomial with leading term 1.""" explanation = "I am a monomial" def __init__(self, roots): self.roots = roots self.leading_term = 1 self.order = len(roots) def __repr__(self): string = "" for root in self.roots: if root == 0: string = string + "x" elif root > 0: string = string + "(x - {})".format(root) else: string = string + "(x + {})".format(-root) return string m = Monomial((-1, 4, 9)) m.explain_to("Caroline") print(m) """ Explanation: We note that these functions, methods and variables may not be exactly right, as they are given for the general Polynomial class, not by the specific Monomial class. If we redefine these functions and variables inside the Monomial class, they will override those defined in the Polynomial class. We do not have to override all the functions and variables, just the parts we want to change: End of explanation """ s = Polynomial((2, 3), 4) s.explain_to("David") print(s) """ Explanation: This has had no effect on the original Polynomial class and variables, which can be used as before: End of explanation """ t = m*s t.explain_to("Erik") print(t) """ Explanation: And, as Monomial variables are Polynomials, we can multiply them together to get a Polynomial: End of explanation """ class Monomial(Polynomial): """Representing a monomial, which is a polynomial with leading term 1.""" explanation = "I am a monomial" def __init__(self, roots): Polynomial.__init__(self, roots, 1) def __repr__(self): string = "" for root in self.roots: if root == 0: string = string + "x" elif root > 0: string = string + "(x - {})".format(root) else: string = string + "(x + {})".format(-root) return string v = Monomial((2, -3)) v.explain_to("Fred") print(v) """ Explanation: In fact, we can be a bit smarter than this. Note that the __init__ function of the Monomial class is identical to that of the Polynomial class, just with the leading_term set explicitly to 1. Rather than duplicating the code and modifying a single value, we can call the __init__ function of the Polynomial class directly. This is because the Monomial class is built on the Polynomial class, so knows about it. We regenerate the class, but only change the __init__ function: End of explanation """
SamLau95/nbinteract
docs/notebooks/recipes/recipes_layout.ipynb
bsd-3-clause
df_interact(videos) # nbi:left options = { 'title': 'Views for Trending Videos', 'xlabel': 'Date Trending', 'ylabel': 'Views', 'animation_duration': 500, 'aspect_ratio': 1.0, } def xs(channel): return videos.loc[videos['channel_title'] == channel].index def ys(xs): return videos.loc[xs, 'views'] nbi.scatter(xs, ys, channel=videos['channel_title'].unique()[9:15], options=options) # nbi:right options={ 'ylabel': 'Proportion per Unit', 'bins': 100, 'aspect_ratio': 1.0, } def values(col): vals = videos[col] return vals[vals < vals.quantile(0.8)] nbi.hist(values, col=widgets.ToggleButtons(options=['views', 'likes', 'dislikes', 'comment_count']), options=options) """ Explanation: Page Layout / Dashboarding nbinteract gives basic page layout functionality using special comments in your code. Include one or more of these markers in a Python comment and nbinteract will add their corresponding CSS classes to the generated cells. | Marker | Description | CSS class added | | --------- | --------- | --------- | | nbi:left | Floats cell to the left | nbinteract-left | | nbi:right | Floats cell to the right | nbinteract-right | | nbi:hide_in | Hides cell input | nbinteract-hide_in | | nbi:hide_out | Hides cell output | nbinteract-hide_out | By default, only the full template will automatically provide styling for these classes. For other templates, nbinteract assumes that the embedding page will use the CSS classes to style the cells. You can use the layout markers to create simple dashboards. In this page, we create a dashboard using a dataset of trending videos on YouTube. We first create a dashboard showing the code used to generate the plots. Further down the page, we replicate the dashboard without showing the code. End of explanation """ # nbi:hide_in df_interact(videos) # nbi:hide_in # nbi:left options = { 'title': 'Views for Trending Videos', 'xlabel': 'Date Trending', 'ylabel': 'Views', 'animation_duration': 500, 'aspect_ratio': 1.0, } def xs(channel): return videos.loc[videos['channel_title'] == channel].index def ys(xs): return videos.loc[xs, 'views'] nbi.scatter(xs, ys, channel=videos['channel_title'].unique()[9:15], options=options) # nbi:hide_in # nbi:right options={ 'ylabel': 'Proportion per Unit', 'bins': 100, 'aspect_ratio': 1.0, } def values(col): vals = videos[col] return vals[vals < vals.quantile(0.8)] nbi.hist(values, col=widgets.ToggleButtons(options=['views', 'likes', 'dislikes', 'comment_count']), options=options) """ Explanation: Dashboard (without showing code) End of explanation """
LSSTC-DSFP/LSSTC-DSFP-Sessions
Sessions/Session11/Day3/PSFphotometrySolutions.ipynb
mit
import numpy as np from astropy.io import fits import matplotlib.pyplot as plt import astropy.convolution import pandas as pd f = fits.open("calexp-0527247_10.fits") image = f[1].data """ Explanation: PSF Photometry Version 0.1 We're going to try to piece together the different elements of a PSF photometry pipeline from scratch. Getting that done in one notebook means we'll have to cut some corners, but the process should be illustrative. We will start with an image that has already been processed by the LSST pipelines, so all the calibration steps like bias subtraction, flat fielding, background subtraction, etc (together often called "instrumental signature removal") have been performed, and the image is ready for measurement. Please download the calibrated image. By C Slater (University of Washington) End of explanation """ # Answer plt.imshow(image[:300,:300], vmin=-10, vmax=70, extent=(0, 300, 0, 300)) """ Explanation: 0) Finding an example star I think a good way to work on a problem like this is to start with the core of the algorithm, working on just a single test case. After we have that working and tested, we can build out the infrastructure around it to run on the entire image. Let's display a small subset of the image, say 400x400 pixels. By default, imshow() will scale the colorbar to the minimum and maximum pixel values, so let's also set some more reasonable limits so we can see some stars. We also need to use the extent= keyword argument to imshow() so that the labels on the X and Y axes correspond to the pixel coordinates that we've selected. You can also open the images in ds9 if you like, for easier browsing. End of explanation """ # Answer plt.imshow(image[:50, 150:200], vmin=-10, vmax=70, extent=(150, 200, 50, 0)) """ Explanation: Now let's select a smaller region around something that looks like a good, isolated star. Remember to update the extent so we know which pixels we're looking at. End of explanation """ cutout = image[15:35, 155:185] plt.imshow(cutout) """ Explanation: Ok, we need to cut down the image one more time, this time to give us a "cutout" image of a single star-like object. The cutout should only be about 20x20 pixels. End of explanation """ xx, yy = np.meshgrid(range(2, 10), range(20, 30)) print("xx: ", xx) print("yy: ", yy) """ Explanation: 1) Centroiding Now that we have a test case to work on, let's find its position on the CCD. To do that, we're going to need two arrays: one which has the same shape as cutout, but where each value is the X coordinate of the pixel, and another where each value is the Y coordinate of the pixel. Numpy has a function called meshgrid() that will give us this; we just need to supply an iterator for the X values, and an iterator for the Y values. It looks like this: End of explanation """ # Answer xx, yy = np.meshgrid(np.arange(155, 185), np.arange(15, 35)) """ Explanation: Note how the values in a column are the same in xx, and all the values in a row are the same in yy. Let's make an xx and yy with the values corresponding to the pixel coordinates in your cutout image. End of explanation """ # Answer x_center = np.sum(cutout*xx)/np.sum(cutout) y_center = np.sum(cutout*yy)/np.sum(cutout) print(x_center, y_center) """ Explanation: Now we're ready to compute the centroid. Let's compute it first in x: we want the weighted mean of xx, with our cutout image as the weights. Remember to normalize by the sum of cutout values. The same formula will apply for y. End of explanation """ # Answer plt.imshow(image[:50, 150:200], vmin=-10, vmax=70, extent=(150, 200, 50, 0)) plt.axvline(x_center, color='r') plt.axhline(y_center, color='r') """ Explanation: Do the values you got make sense? Are they within the range of x and y coordinate values of the cutout? Does it roughly match where the star is? If not, are they possibly swapped, x-for-y and y-for-x? (It's very easy to get confused with the ordering of x and y indicies in Numpy, I make that mistake all the time). If they make sense, try overplotting the coordinates on one of your larger cutout images. End of explanation """ # Answer def centroid(image_cutout, x_start, y_start): x_size, y_size = image_cutout.shape xx, yy = np.meshgrid(np.arange(start_x, x_start + x_size), np.arange(start_y, y_start + y_size)) x_center = np.sum(image_cutout*xx)/np.sum(image_cutout) y_center = np.sum(image_cutout*yy)/np.sum(image_cutout) return (x_center, y_center) """ Explanation: If your lines cross on your chosen star, great! You've completed the first step of doing photometry, centroiding the object. Let's take the code you prototyped in the notebook cells, and wrap it into a nice function we can use later. When we call this function, we need to tell it about the coordinates of the image we're providing, so we'll add the x_start and y_start parameters to convey that. We don't need to know the other two corners, because we can figure that out from the size of image_cutout. End of explanation """ def gaussian2D(radius, mu): return 1/(mu**2*2*np.pi)*np.exp(-0.5*((radius)/mu)**2) """ Explanation: 2) PSF Photometry We needed the centroid first, because we're going to use that position to place our "PSF" model. Since we have not yet fit a real PSF model to the sources in the image, we'll use a Gaussian as an approximation. I'll give you the function for a normalized 2D Gaussian: End of explanation """ xx, yy = np.meshgrid(np.arange(155, 185), np.arange(15, 35)) x_center, y_center = (164.08, 25.24) radius = np.sqrt((xx - x_center)**2 + (yy - y_center)**2) psf_size_pixels = 2.5 psf_image = gaussian2D(radius, psf_size_pixels) plt.imshow(psf_image) """ Explanation: First just make an image of an example PSF, on the same grid as the cutout. Note that the Gaussian is parameterized in terms of a radius, which means you will need to compute that radius from the position of every pixel in your image. meshgrid is again the tool for this. You can either use your centroid() function here, or for debugging it's fine to manually set x_center and y_center to specific values. End of explanation """ # Answer np.sum(psf_image) """ Explanation: Just to be sure, we should check that the PSF image is normalized (approximately) by summing the pixel values. End of explanation """ # Answer psf_flux = np.sum(psf_image * subset)/np.sum(psf_image**2) print(psf_flux) """ Explanation: Ok, now we can compute the actual PSF flux. Remember the formula from the lecture is: $$ f_{\rm ML}(x, y) = \frac{\sum_i \hat{f}_i p_i(x,y)}{\sum_i p_i^2(x, y)}$$ where $\hat{f_i}$ are your image values, and $p_i$ are are your PSF model values. End of explanation """ # Answer aperture_flux = np.sum(subset) print(aperture_flux) """ Explanation: Double check that the PSF flux you get matches (approximately) the flux you get from aperture photometry. If your cutout image is small enough that there are no other sources in it, you can just sum the cutout itself. No need to apply a more restrictive aperture for a debugging check like this. End of explanation """ # Answer # We need to pass both the centroid x and y, and the image cutout start x,y because the star # isn't necessarily at the very center of the cutout. def psf_flux_gaussian(image_cutout, centroid_x, centroid_y, radius, x_start, y_start): x_size, y_size = image_cutout.shape xx, yy = np.meshgrid(np.arange(start_x, x_start + x_size), np.arange(start_y, y_start + y_size)) r = np.sqrt((xx - centroid_x)**2 + (yy - centroid_y)**2) psf_image = gaussian2D(r, radius) psf_flux = np.sum(psf_image * cutout)/np.sum(psf_image**2) return psf_flux """ Explanation: If your psf_flux reasonably matches your aperture_flux, well done! You have a working PSF photometry measurement, now it just needs to get wrapped up in a convenient function for later use. End of explanation """ # Answer def find_peaks(image, threshold): # We are going to append the peaks we find to these two lists peak_x_values = [] peak_y_values = [] edgewidth = 10 for i in range(edgewidth, image.shape[0] - edgewidth): for j in range(edgewidth, image.shape[1] - edgewidth): pixel = image[i,j] if(pixel < threshold): continue if(pixel > image[i+1, j] and pixel > image[i-1, j] and pixel > image[i, j+1] and pixel > image[i, j-1] and pixel > image[i-1, j-1] and pixel > image[i+1, j-1] and pixel > image[i-1, j+1] and pixel > image[i+1, j+1]): peak_x_values.append(i) peak_y_values.append(j) # Now that we're done appending to them, it will be easier if we turn the # lists into numpy arrays. return np.array(peak_x_values), np.array(peak_y_values) """ Explanation: 3) Object Detection Now that we have the core of the algorithm, we need to improve on our earlier step where we hand-picked a single source to measure. We know from the talk on object detection that we need to convolve the image with the PSF to detect sources. Of course, we don't yet know what the PSF is, so we'll guess and use a Gaussian again. With the convolved image, we now need to find "peaks". That is, we want to find pixels whose value is greater than all of their immediate neighbors. That's a relatively easy way to make sure we (mostly) only try to run photometry once on each star. We are also applying a threshold; if a pixel value is below this threshold, we don't bother checking if it's a peak. That's useful to exclude faint background fluctuations that aren't statistically significant (below 5-sigma), or we might set the threshold higher if if we want only bright stars for PSF determination. The edges of the sensor often contain various artifacts, so you might want to exclude 5 to 10 pixels around each edge from the search. Programming note: we're going to do a python loop over all the pixels in the image. This is a really slow way to do this, and you should try to avoid loops like this as much as possible in python. We're doing it this way only because 1) it's illustrative and 2) it takes less than a minute; acceptable for a notebook, but not how we process LSST. End of explanation """ %%time # Answer convolved_image = astropy.convolution.convolve(image, astropy.convolution.Gaussian2DKernel(2.6)) peak_x_values, peak_y_values = find_peaks(convolved_image, 100) """ Explanation: To use the peak-finder, we need to create a "detection image" by convolving the real image with the PSF. Of course, we don't know the PSF yet, so you can substitute a guess: try a Gaussian kernel, with a 2.5 pixel width. The %%time "magic" will show us how long the convolution and peak-finding took. End of explanation """ # Answer plt.plot(peak_x_values, peak_y_values, '.') """ Explanation: Let's plot the positions of the peaks on the image, to make sure they look reasonable End of explanation """ # answer n = 50 peak_x = peak_x_values[n] peak_y = peak_y_values[n] cutout = image[(peak_x - 5):(peak_x + 5), (peak_y - 5):(peak_y + 5)] plt.imshow(cutout) """ Explanation: A good debugging check is to look at a few cutouts centered on your newly-found detections. You can flip through a few of these by changing the value of n. End of explanation """ # Answer def run_photometry(image, threshold, psf_width): convolved_image = convolve(image, Gaussian2DKernel(2.6)) peak_xs, peak_ys = find_peaks(convolved_image, threshold) moments_x = [] moments_y = [] centroids_x = [] centroids_y = [] fluxes = [] for peak_x, peak_y in zip(peak_xs, peak_ys): image_cutout = image[(peak_x - 5):(peak_x + 5), (peak_y - 5):(peak_y + 5)] start_x = int(peak_x - 5) start_y = int(peak_y - 5) centroid_x, centroid_y = centroid(image_cutout, peak_x, peak_y) centroids_x.append(centroid_x) centroids_y.append(centroid_y) moment_x, moment_y = second_moment(image_cutout, peak_x, peak_y, start_x, start_y) moments_x.append(moment_x) moments_y.append(moment_y) psf_flux = psf_flux_gaussian(image_cutout, centroid_x, centroid_y, psf_width, start_x, start_y) fluxes.append(psf_flux) return pd.DataFrame({"centroid_x": centroids_x, "centroid_y": centroids_y, "moment_x": moments_x, "moment_y": moments_y, "gaussian_flux": fluxes}) """ Explanation: 4) Photometry on all objects You're almost finished, the only remaining task is to put together all the different pieces from above into one function that finds sources and measures their sizes and fluxes, and outputs a data table at the end. For the moment, I will tell you that the Gaussian PSF size is 2 pixels. If you have more time, there's an "extra credit" problem at the end of the notebook that will show you how to measure the PSF size directly, which also lets you measure object sizes in general. But try to get the PSF photometry working first before going onto that. End of explanation """ %%time # Answer photometry_table = run_photometry(image, 50, median_moment) print(photometry_table[:20]) print(photometry_table[:20]) """ Explanation: With that function all filled in, let's run it on the image! End of explanation """ # answer def second_moment(image_cutout, centroid_x, centroid_y, start_x, start_y): x_size, y_size = image_cutout.shape xx, yy = np.meshgrid(np.arange(start_x, start_x + x_size), np.arange(start_y, start_y + y_size)) x_width = np.sqrt(np.sum((image_cutout*(xx - centroid_x))**2))/np.sqrt(np.sum(image_cutout**2)) y_width = np.sqrt(np.sum((image_cutout*(yy - centroid_y))**2))/np.sqrt(np.sum(image_cutout**2)) return (x_width, y_width) """ Explanation: Did you get a table full of photometry? If so, great! If it's not working well, it's likely to be a problem with getting the right inputs to the different functions you're calling. You've tested all the steps separately, so they should be working. Getting the right indices on your image cutout is always a tricky part. If you have extra time, try adding an aperture photometry function to the processing. You can plot the size (from the second moment) against flux to find what objects might be galaxies, and generate the cutout image to see if they're really galaxies. Extra Credit: Measuring the PSF Once we have sources identified in an image, we want to identify which would be good for PSF determination, and then we want to measure their PSFs. In our case we're going to do both of these at once, we're going to measure sizes for all sources, and then use the mean size of those which we think are stars as our PSF model. In a more sophisticated pipeline, the object sizes might be used as a cut before passing to some more complicated PSF determination process. To obtain object sizes, we're going to measure the "second moment". This will look a lot like the centroid algorithm. The formula we want to implement is: $$I_{xx}^2 = \frac{\sum_i (\hat{f_i} (x_i - x_{\rm center}))^2}{\sum_i \hat{f_i}^2} $$ Let's try building it directly in the function this time; if it gives you trouble, feel free to try it out in some notebook cells directly (so you can see the intermediate variables better) before putting it back in the function. End of explanation """ # Answer second_moment(cutout, 5.0, 5.0, 0, 0) """ Explanation: Let's run the second moment estimator on one of the cutouts you made above. End of explanation """ # Answer %%time # We will put the x and y moments in these lists moments_x = [] moments_y = [] for peak_x, peak_y in # complete image_cutout = image[(peak_x - 5):(peak_x + 5), (peak_y - 5):(peak_y + 5)] start_x = int(peak_x - 5) start_y = int(peak_y - 5) moment_x, moment_y = second_moment(image_cutout, peak_x, peak_y, start_x, start_y) moments_x.append(moment_x) moments_y.append(moment_y) """ Explanation: Do the results look reasonable, compared to the image of the cutout you made above? Note that this is the Gaussian width, not the full-width at half-max that is typically quoted for PSF sizes. If those look good, now we just need to run the second moment estimator over all the sources in your catalog. Our goal is to find if there's one particular size that fits lots of objects; that's likely to be our PSF size and the objects are likely to be stars. End of explanation """ # Answer moments_sq = np.sqrt(np.array(moments_x)**2 + np.array(moments_y)**2) plt.hist(moments_sq, bins=40) plt.xlabel("Second Moment (pixels)") """ Explanation: Because we have second moments in both X and Y directions, we should combine them into a single value as the square root of the sum of squares. End of explanation """
satishgoda/learning
python/libs/rxpy/support/A Decision Tree of Observable Operators. Part I - Creation.ipynb
mit
reset_start_time(O.just) stream = O.just({'answer': rand()}) disposable = subs(stream) sleep(0.5) disposable = subs(stream) # same answer # all stream ops work, its a real stream: disposable = subs(stream.map(lambda x: x.get('answer', 0) * 2)) """ Explanation: A Decision Tree of Observable Operators Part 1: NEW Observables. source: http://reactivex.io/documentation/operators.html#tree. (transcribed to RxPY 1.5.7, Py2.7 / 2016-12, Gunther Klessinger, axiros) This tree can help you find the ReactiveX Observable operator you’re looking for. <h2 id="tocheading">Table of Contents</h2> <div id="toc"></div> Usage There are no configured behind the scenes imports or code except startup.py, which defines output helper functions, mainly: rst, reset_start_time: resets a global timer, in order to have use cases starting from 0. subs(observable): subscribes to an observable, printing notifications with time, thread, value All other code is explicitly given in the notebook. Since all initialisiation of tools is in the first cell, you always have to run the first cell after ipython kernel restarts. All other cells are autonmous. In the use case functions, in contrast to the official examples we simply use rand quite often (mapped to randint(0, 100)), to demonstrate when/how often observable sequences are generated and when their result is buffered for various subscribers. When in doubt then run the cell again, you might have been "lucky" and got the same random. RxJS The (bold printed) operator functions are linked to the official documentation and created roughly analogous to the RxJS examples. The rest of the TOC lines links to anchors within the notebooks. Output When the output is not in marble format we display it like so: ``` new subscription on stream 276507289 3.4 M [next] 1.4: {'answer': 42} 3.5 T1 [cmpl] 1.6: fin `` where the lines are syncronouslyprinted as they happen. "M" and "T1" would be thread names ("M" is main thread). For each use case inreset_start_time()(aliasrst`), a global timer is set to 0 and we show the offset to it, in milliseconds & with one decimal value and also the offset to the start of stream subscription. In the example 3.4, 3.5 are millis since global counter reset, while 1.4, 1.6 are offsets to start of subscription. I want to create a NEW Observable... ... that emits a particular item: just End of explanation """ print('There is a little API difference to RxJS, see Remarks:\n') rst(O.start) def f(): log('function called') return rand() stream = O.start(func=f) d = subs(stream) d = subs(stream) header("Exceptions are handled correctly (an observable should never except):") def breaking_f(): return 1 / 0 stream = O.start(func=breaking_f) d = subs(stream) d = subs(stream) # startasync: only in python3 and possibly here(?) http://www.tornadoweb.org/en/stable/concurrent.html#tornado.concurrent.Future #stream = O.start_async(f) #d = subs(stream) """ Explanation: ..that was returned from a function called at subscribe-time: start End of explanation """ rst(O.from_iterable) def f(): log('function called') return rand() # aliases: O.from_, O.from_list # 1.: From a tuple: stream = O.from_iterable((1,2,rand())) d = subs(stream) # d = subs(stream) # same result # 2. from a generator gen = (rand() for j in range(3)) stream = O.from_iterable(gen) d = subs(stream) rst(O.from_callback) # in my words: In the on_next of the subscriber you'll have the original arguments, # potentially objects, e.g. user original http requests. # i.e. you could merge those with the result stream of a backend call to # a webservice or db and send the request.response back to the user then. def g(f, a, b): f(a, b) log('called f') stream = O.from_callback(lambda a, b, f: g(f, a, b))('fu', 'bar') d = subs(stream.delay(200)) # d = subs(stream.delay(200)) # does NOT work """ Explanation: ..that was returned from an Action, Callable, Runnable, or something of that sort, called at subscribe-time: from End of explanation """ rst() # start a stream of 0, 1, 2, .. after 200 ms, with a delay of 100 ms: stream = O.timer(200, 100).time_interval()\ .map(lambda x: 'val:%s dt:%s' % (x.value, x.interval))\ .take(3) d = subs(stream, name='observer1') # intermix directly with another one d = subs(stream, name='observer2') """ Explanation: ...after a specified delay: timer End of explanation """ rst(O.repeat) # repeat is over *values*, not function calls. Use generate or create for function calls! subs(O.repeat({'rand': time.time()}, 3)) header('do while:') l = [] def condition(x): l.append(1) return True if len(l) < 2 else False stream = O.just(42).do_while(condition) d = subs(stream) """ Explanation: ...that emits a sequence of items repeatedly: repeat End of explanation """ rx = O.create rst(rx) def f(obs): # this function is called for every observer obs.on_next(rand()) obs.on_next(rand()) obs.on_completed() def cleanup(): log('cleaning up...') return cleanup stream = O.create(f).delay(200) # the delay causes the cleanup called before the subs gets the vals d = subs(stream) d = subs(stream) sleep(0.5) rst(title='Exceptions are handled nicely') l = [] def excepting_f(obs): for i in range(3): l.append(1) obs.on_next('%s %s (observer hash: %s)' % (i, 1. / (3 - len(l)), hash(obs) )) obs.on_completed() stream = O.create(excepting_f) d = subs(stream) d = subs(stream) rst(title='Feature or Bug?') print('(where are the first two values?)') l = [] def excepting_f(obs): for i in range(3): l.append(1) obs.on_next('%s %s (observer hash: %s)' % (i, 1. / (3 - len(l)), hash(obs) )) obs.on_completed() stream = O.create(excepting_f).delay(100) d = subs(stream) d = subs(stream) # I think its an (amazing) feature, preventing to process functions results of later(!) failing functions rx = O.generate rst(rx) """The basic form of generate takes four parameters: the first item to emit a function to test an item to determine whether to emit it (true) or terminate the Observable (false) a function to generate the next item to test and emit based on the value of the previous item a function to transform items before emitting them """ def generator_based_on_previous(x): return x + 1.1 def doubler(x): return 2 * x d = subs(rx(0, lambda x: x < 4, generator_based_on_previous, doubler)) rx = O.generate_with_relative_time rst(rx) stream = rx(1, lambda x: x < 4, lambda x: x + 1, lambda x: x, lambda t: 100) d = subs(stream) """ Explanation: ...from scratch, with custom logic and cleanup (calling a function again and again): create End of explanation """ rst(O.defer) # plural! (unique per subscription) streams = O.defer(lambda: O.just(rand())) d = subs(streams) d = subs(streams) # gets other values - created by subscription! # evaluating a condition at subscription time in order to decide which of two streams to take. rst(O.if_then) cond = True def should_run(): return cond streams = O.if_then(should_run, O.return_value(43), O.return_value(56)) d = subs(streams) log('condition will now evaluate falsy:') cond = False streams = O.if_then(should_run, O.return_value(43), O.return_value(rand())) d = subs(streams) d = subs(streams) """ Explanation: ...for each observer that subscribes OR according to a condition at subscription time: defer / if_then End of explanation """ rst(O.range) d = subs(O.range(0, 3)) """ Explanation: ...that emits a sequence of integers: range End of explanation """ rst(O.interval) d = subs(O.interval(100).time_interval()\ .map(lambda x, v: '%(interval)s %(value)s' \ % ItemGetter(x)).take(3)) """ Explanation: ...at particular intervals of time: interval (you can .publish() it to get an easy "hot" observable) End of explanation """ rst(O.empty) d = subs(O.empty()) """ Explanation: ...after a specified delay (see timer) ...that completes without emitting items: empty End of explanation """ rst(O.never) d = subs(O.never()) """ Explanation: ...that does nothing at all: never End of explanation """ rst(O.throw) d = subs(O.throw(ZeroDivisionError)) """ Explanation: ...that excepts: throw End of explanation """
ComputationalModeling/spring-2017-danielak
past-semesters/spring_2016/homework_assignments/Homework_6.ipynb
agpl-3.0
%matplotlib inline import matplotlib.pyplot as plt import numpy as np import math def calc_accel(velocity, use_resistance=True): ''' This function calculates and returns the acceleration vector from a launched t-shirt, given an input velocity. Optionally, you can turn on and off air resistance for comparison purposes with resistance=True or False. Note 1: inputs and outputs are in SI units - meters per second for velocity and meters per second^2 for acceleration. Note 2: both input velocity and returned acceleration are 2-element numpy arrays, with the horizontal (x) direction in the first element and vertical (y) direction in the second element of the array. ''' C = 2.0 # t-shirt rho_air = 1.225 # kg/m^3 rho_tshirt = 500.0 # kg/m^3 mass_obj = 1.0 # kg rad_obj = ((3.0*mass_obj)/(4.0*rho_tshirt*math.pi))**(1.0/3.0) # assume sphere for simplicity; slight approx. # speed = magnitude of velocity vector speed = (velocity[0]**2 + velocity[1]**2)**0.5 if speed > 75.0: print("WARNING! At speeds > 75 m/s, your t-shirt cannon will explode!") accel_vector = np.zeros(2) # if we have air resistance turned on, compute the force due to air # resistance and add it to the acceleration. if use_resistance==True: ''' Calculates acceleration due to air resistance |F_air| = 0.5 * C * rho * A * v^2 C = constant based on shape rho = density of air A = cross-sectional area v = speed (magnitude of velocity) Note that we calculate the magnitude in the first step and turn it into a vector in the second step. ''' accel_air_resist = -0.5 * (C/mass_obj) * rho_air * math.pi * rad_obj**2 * speed**2 # turn this into a vector quantity - velocity[0]/speed gives x-component of velocity unit vector # and velocity[1]/speed gives y-component of velocity unit vector accel_vector[0] += accel_air_resist*velocity[0]/speed accel_vector[1] += accel_air_resist*velocity[1]/speed # we always have gravity, which always points downward. accel_vector[1] -= 9.81 return accel_vector # Put your code here! """ Explanation: Homework 6 - The T-Shirt Cannon The MSU Science Festival has decided that its existing marketing efforts are not effective enough at attracting peoples' attention, and has decided to enter a new era of high-velocity publicity. You have been commissioned to build a t-shirt cannon to distribute Science Festival t-shirts in MSU's large lecture halls to raise public awareness for the festival. Your deliverables for this highly lucrative contract are (1) a fully functioning t-shirt cannon and (2) a program that calculates ranges as a function of the t-shirt cannon's launch speed and launch angle, and some informative graphs so that the t-shirt cannon's operator can precisely target their long-distance PR efforts. You have already constructed your t-shirt cannon; now you need to make predictions about how far you can launch a t-shirt, taking into account the challenges inherent to the nascent, but potentially highly lucrative, field of indoor high-velocity garment distribution. As part of your contract, you have agreed to provide software, tables, and graphs that model the behavior of the t-shirt cannon. Once you do this, you will take your place in t-shirt cannon history, beside such innovators as the San Antonio Coyote (also known as the Tesla of T-Shirt Cannons). In the paragraphs below, we provide you with some background knowledge that you will need for this model as well as detailed instructions for the assignment itself. Happy simulating! And remember... you're securing your place in history with this project. Note: While it's fine to talk with others about this homework - and in fact we strongly encourage you to do so - you need to write your own code and turn in your own homework assignment! <img src="https://static01.nyt.com/images/2013/06/23/magazine/23wmt1/mag-23WMT-t_CA0-master675.jpg" width=500> Your name // put your name here! Some background knowledge that we need for this model Position, velocity, and acceleration In this homework assignment, we are going to model a relatively simple system - namely, the launching of a t-shirt. An object's position ($\vec{x}$) and velocity ($\vec{v}$) evolve according to a straightforward set of mathematical rules: $\vec{x} = \vec{x}_0 + \int_0^t \vec{v} dt$ $\vec{v} = \vec{v}_0 + \int_0^t \vec{a} dt$ $\vec{a} = \frac{d\vec{v}}{dt} = \frac{1}{m}\sum \vec{F}$ Note that $\vec{a}$ is the acceleration and m is the mass of the t-shirt. $\vec{x}_0$ and $\vec{v}_0$ are the position and velocity at $t=0$, respectively. The little arrows on top of letters indicate vector quantities - i.e., quantities that have direction as well as magnitude. With vector quantities, you can separate the components and solve them independently - for example, you only need to know the x-velocity to calculate the x-position (though ultimately you may need some quantities that need information from both - you need the speed, which includes x- and y-velocity, to calculate the acceleration due to air resistance). In the last term of the third equation, the sum is over all forces acting on the object (i.e., the t-shirt). The forces acting on the object are gravity, which points downward with a magnitude of $g=9.81$m s$^{-2}$, and air resistance, which can be written as: $\vec{F}_{air} = -\frac{1}{2} C \rho A v^2 \hat{v}$ Where C is a constant corresponding to the shape of the object ($C=0.47$ for a sphere, $C=2.0$ for an Industry Standard T-Shirt, or IST), $\rho$ is the density of the atmosphere, 1.2 kg/m$^3$, A is the cross-sectional area of the object, v is the magnitude of the velocity, and $\hat{v}$ is the unit vector of the velocity. Note the minus sign - this means that the force of air resistance always opposes the motion of the object, regardless of its direction. Section 1 - The Model Your task is to create a code that implements the equations presented above to model the position and velocity of the t-shirt cannon as a function of time. You have been provided a function that calculates the acceleration on the t-shirt due to both air resistance and gravity; you need to write a function that takes in a speed (the magnitude of velocity) and a launch angle (i.e., the angle above horizontal that you are launching the t-shirt) and returns lists of horizontal and vertical positions and velocities, as well as times. Note that you will be graded both on the correctness of your solution and the quality of your code. Use functions when possible, and make sure that your code is clearly written and has comments explaining what everything does. Also make sure that all plots are clearly marked with axis labels and a title! End of explanation """ # Put your code here! """ Explanation: Section 2 - some questions and plots Note: make sure that all plots have appropriate x- and y-limits, as well as figure titles and axis labels. The questions may require both code and a written answer, so please make sure to do both! Question 1: Create a plot that shows trajectories (lines showing the x- and y-position of the t-shirt, without showing the time) for a grid of speeds (ranging from 15 to 75 m/s in steps of 15 m/s) and angles above the horizontal (from 15 to 75 degrees in steps of 15 degrees). For one of the values (45 degrees and 45 m/s) also plot the trajectory when you turn off air resistance (with the argument resistance=False in the calc_accel() function). Note that your t-shirt cannon can only launch t-shirts at speeds of up to 75 m/s - at any higher speeds, you risk the cannon exploding or the t-shirt disintegrating in flight! End of explanation """ # Put your code here! """ Explanation: Question 2: Qualitatively, how important is the effect of air resistance in this calculation? Put your answer here! Question 3: When air resistance is included, what combination of angle and speed gives you the longest "hang time"? In other words, if you want the t-shirt to be in the air for as long as possible, what values do you choose? (Note: this doesn't have to be the angles and speeds chosen above, but your cannon can only launch t-shirts at speeds of up to 75 m/s.) End of explanation """ # Put your code here! """ Explanation: Put your answer here! Question 4: When air resistance is included, what combination of angle and speed lets you send a t-shirt the farthest horizontal distance? End of explanation """ # Put your code here! """ Explanation: Put your answer here! Question 5: You are asked to distribute t-shirts in a lecture hall that has 12 meter high ceilings. What is the farthest horizontal distance that you can send a t-shirt in that case, and how long does it take to get there? (Ignore the fact that most lecture halls have stadium-type ceiling; imagine the floor is flat.) End of explanation """
metpy/MetPy
v0.10/_downloads/0fad3c70b425eaed875fe7cd5ea738b8/Advanced_Sounding.ipynb
bsd-3-clause
import matplotlib.pyplot as plt import numpy as np import pandas as pd import metpy.calc as mpcalc from metpy.cbook import get_test_data from metpy.plots import add_metpy_logo, SkewT from metpy.units import units """ Explanation: Advanced Sounding Plot a sounding using MetPy with more advanced features. Beyond just plotting data, this uses calculations from metpy.calc to find the lifted condensation level (LCL) and the profile of a surface-based parcel. The area between the ambient profile and the parcel profile is colored as well. End of explanation """ col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed'] df = pd.read_fwf(get_test_data('may4_sounding.txt', as_file_obj=False), skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names) df['u_wind'], df['v_wind'] = mpcalc.wind_components(df['speed'], np.deg2rad(df['direction'])) # Drop any rows with all NaN values for T, Td, winds df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed', 'u_wind', 'v_wind'), how='all').reset_index(drop=True) """ Explanation: Upper air data can be obtained using the siphon package, but for this example we will use some of MetPy's sample data. End of explanation """ p = df['pressure'].values * units.hPa T = df['temperature'].values * units.degC Td = df['dewpoint'].values * units.degC wind_speed = df['speed'].values * units.knots wind_dir = df['direction'].values * units.degrees u, v = mpcalc.wind_components(wind_speed, wind_dir) """ Explanation: We will pull the data out of the example dataset into individual variables and assign units. End of explanation """ fig = plt.figure(figsize=(9, 9)) add_metpy_logo(fig, 115, 100) skew = SkewT(fig, rotation=45) # Plot the data using normal plotting functions, in this case using # log scaling in Y, as dictated by the typical meteorological plot skew.plot(p, T, 'r') skew.plot(p, Td, 'g') skew.plot_barbs(p, u, v) skew.ax.set_ylim(1000, 100) skew.ax.set_xlim(-40, 60) # Calculate LCL height and plot as black dot lcl_pressure, lcl_temperature = mpcalc.lcl(p[0], T[0], Td[0]) skew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black') # Calculate full parcel profile and add to plot as black line prof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC') skew.plot(p, prof, 'k', linewidth=2) # Shade areas of CAPE and CIN skew.shade_cin(p, T, prof) skew.shade_cape(p, T, prof) # An example of a slanted line at constant T -- in this case the 0 # isotherm skew.ax.axvline(0, color='c', linestyle='--', linewidth=2) # Add the relevant special lines skew.plot_dry_adiabats() skew.plot_moist_adiabats() skew.plot_mixing_lines() # Show the plot plt.show() """ Explanation: Create a new figure. The dimensions here give a good aspect ratio. End of explanation """
fdmazzone/Ecuaciones_Diferenciales
Teoria_Basica/scripts/EjerciciosGruposLie.ipynb
gpl-2.0
from sympy import * init_printing() x,epsilon=symbols('x,epsilon') y=Function('y')(x) x1=x*(x+y)/(y+(1+epsilon)*x) y1=(epsilon*x+y)*(x+y)/(y+(1+epsilon)*x) exp1=y1.diff(x)/x1.diff(x) exp2=exp1.subs(y.diff(x),(x**2*sin(x+y)+y)/x/(1-x*sin(x+y))) x1,y1=symbols('x1,y1') exp2=exp2.simplify() exp3=exp2.subs({y:(-epsilon*x1+y1)*(x1+y1)/(y1+(1-epsilon)*x1),x:x1*(x1+y1)/(y1+(1-epsilon)*x1)}) """ Explanation: Ejercicio N° 5 Recuperatorio Primer Parcial 2015 Las siguientes relaciones $$\hat{x}=\frac{x(x+y)}{x+y+\epsilon}$$ $$\hat{y}=\frac{(x+y)(\epsilon x+y)}{x+y+\epsilon}$$ definen un grupo de Lie uniparamétrico. Usando SymPy demostrar que $(\hat{x},\hat{y})$ son simetrías de la ecuación $$y'=\frac{ x^2\sin(x + y) + y}{x(1-x\sin(x + y))}.$$ Encontrar variables canónicas asocidas a las simetrías. Resolver la ecuación diferencial. Primero veamos si el grupo propuesto es simetría de la ecuación. End of explanation """ exp3 """ Explanation: En exp3 tenemos el resultado del cambio de varibles en la ecuación. Veamos que tiene End of explanation """ exp4=exp3.simplify() exp4 """ Explanation: Verdaderamente asusta. Veamos si lo simplifica End of explanation """ ((-epsilon*x1**2 - epsilon*x1*y1 + x1**2 + 2*x1*y1 + y1**2)/(epsilon*x1 - x1 - y1)).simplify() """ Explanation: La ecuación luce parecida a la original, pero lamentablemete no simplifica el argumento de la función sen. Tomo esos argumentos por separado y le pido que me los simplifique End of explanation """ x1=x*(x+y)/(y+(1+epsilon)*x) y1=(epsilon*x+y)*(x+y)/(y+(1+epsilon)*x) xi=x1.diff(epsilon).subs(epsilon,0) eta=y1.diff(epsilon).subs(epsilon,0) xi,eta (eta/xi).simplify() dsolve(y.diff(x)-eta/xi,y) """ Explanation: Los argumentos de la función sen es -x1-y1 que es justo lo que necesito para que quede la ecuación original. Ahora hallemos coordenadas canónicas End of explanation """ r=symbols('r') s=Integral((1/xi).subs(y,r-x),x).doit() s s=s.subs(r,x+y) s s.expand() """ Explanation: Vemos que r=y+x End of explanation """ r=x+y s=y/x exp5=r.diff(x)/s.diff(x) exp6=exp5.subs(y.diff(x),(x**2*sin(x+y)+y)/x/(1-x*sin(x+y))) exp6.simplify() """ Explanation: La variable s sería 1+y/x. Pero las variables canónicas no son únicas, vimos que (F(r),G(r)+s) es canónica, para cualquier F no nula y G.En particular si tomamos F(r)=r y G(r)=-1, vemos que podemos elegir como coordenadas canónicas r=x+y y s=y/x. Hagamos la sustitución en coordenadas canónicas End of explanation """ r2,s2,x2,y2=symbols('r2,s2,x2,y2') solve([r2-x2-y2,s2-y2/x2],[x2,y2]) exp6.subs({y:r2*s2/(s2+1),x:r2/(s2+1)}).simplify() """ Explanation: Vemos que el resultado es 1/sen(r). No obstante hagamos, de curiosidad, la sustitución como si no nos hubiesemos dado cuenta todavía del resultado. Hallemos los cambios inversos (r,s)->(x,y) End of explanation """ x,y,r,s=symbols('x,y,r,s') f=Function('f')(x) dsolve(f.diff(x)-(f**2-x**2)/2/x/f) Integral(1/(2*x*sqrt(x*(r-x))),x).doit() """ Explanation: La ecuación que resulta r'=1/sen(r) se resuelve facilmente a mano. Nos queda cos(r)=s+C -> cos(x+y)=y/x+C que es la solución de la ecuación original Ejercicio 2.2(c) p. 41 de Hydon. Hay que hallar el grupo de Lie cuyo generador infintesimal es $$X=2xy\partial_x+(y^2-x^2)\partial_y$$ La idea es 1) Hallar coordenadas canónicas 2) Usar que en canónicas el grupo de simetrías satisface $$\hat{r}=r\quad\text{y}\quad\hat{s}=s+\epsilon$$ 3) Escribir las relaciones anteriores en las variables originales. End of explanation """ u=symbols('u') Integral(1/u**2/sqrt(r-u**2),u).doit() """ Explanation: No lo sabe hacer. Si hacemos la sustitución $x=u^2$ nos queda $$\int\frac{dx}{2x\sqrt{x(r-x)}}=\int\frac{du}{u^2\sqrt{r-u^2}}.$$ Y esta si la sabe resolver. End of explanation """ x,y,xn,yn,epsilon=symbols('x,y,\hat{x},\hat{y},epsilon') A=solve([(xn**2+yn**2)/xn-(x**2+y**2)/x , -yn/(xn**2+yn**2)+y/(x**2+y**2)-epsilon],[xn,yn]) A A[0] A=Matrix(A[0]) A """ Explanation: $s=-\frac{1}{r}\sqrt{\frac{r}{u^2}-1}= -\frac{1}{r} \sqrt{ \frac{r-x}{x}}= -\frac{x}{x^2+y^2} \sqrt{ \frac{y^2/x}{x}}=-\frac{y}{x^2+y^2}$ Ahora escribimos $$\hat{r}=r\quad\text{y}\quad\hat{s}=s+\epsilon$$ en $x,y$. End of explanation """ A.diff(epsilon).subs(epsilon,0) """ Explanation: Chequeemos que $\left.\frac{d}{d\epsilon}(\hat{x},\hat{y})\right|_{\epsilon=0}=(2xy,y^2-x^2)$ End of explanation """ T=lambda x,y,epsilon: Matrix([ x/(epsilon**2*(x**2+y**2)-2*epsilon*y+1),-(epsilon*x**2+epsilon*y**2-y)/(epsilon**2*(x**2+y**2)-2*epsilon*y+1)]) epsilon_1,epsilon_2=symbols('epsilon_1,epsilon_2') expr=T(T(x,y,epsilon_1)[0],T(x,y,epsilon_1)[1],epsilon_2)-T(x,y,epsilon_1+epsilon_2) expr simplify(expr) """ Explanation: Chequeemos la propiedad de grupo de Lie. Definimos el operador $T$ con lambda End of explanation """
LorenzoBi/courses
UQ/assignment_3/Assignment 3.ipynb
mit
import numpy as np from scipy.special import binom import matplotlib.pylab as plt from scipy.misc import factorial as fact %matplotlib inline def binomial(p, n, k): return binom(n, k) * p ** k * (1 - p) ** (n-k) """ Explanation: Lorenzo Biasi and Michael Aichmüller End of explanation """ p = 1. / 365 1 - np.sum(binomial(p, 23 * (22) / 2, 0)) """ Explanation: Exercise 1. a. $\Omega$ will be all the possible combinations we have for 150 object two have two diffent values. For example (0, 0, ..., 0), (1, 0, ..., 0), (0, 1, ..., 0), ... (1, 1, ..., 0), ... (1, 1, ..., 1). This sample space has size of $2^{150}$. The random variable $X(\omega)$ will be the number of defective objects there are in the sample $\omega$. We can also define $Y(\omega) = 150 - X(\omega)$, that will be counting the number of checked items. b. The binomial distribution is the distribution that gives the probability of the number of "succeses" in a sequence of random and indipendent boolean values. This is the case for counting the number of broken object in a group of 150 and the probability of being broken of 4%. c. For computing the probability that at most 4 objects are broken we need to sum the probabilities that $k$ objects are broken with $k \in [0, 4]$. $P(<5) = \sum_{k=0}^{150} P(X=k) = \sum_{k=0}^{150} {150\choose k}p^k(1-p)^{150-k}$ The probability is 28 % End of explanation """ np.sum(binomial(p, 150, np.arange(5, 10))) plt.bar(np.arange(20), binomial(p, 150, np.arange(20))) plt.bar(np.arange(5), binomial(p, 150, np.arange(5))) plt.bar(np.arange(5, 10), binomial(p, 150, np.arange(5,10))) plt.xlabel('# defectives') plt.ylabel('P(X=k)') """ Explanation: b. The same of before just that this time $k \in [5, 9]$. The probability is 64% End of explanation """ def not_same_birthday(q): return np.prod((365 - np.arange(q))/ 365) q = 45 p = np.empty(q - 1) for i in range(1, q): p[i - 1] = 1 - not_same_birthday(i) plt.plot(np.arange(1, q), p) plt.plot(23, 1 - not_same_birthday(23), 'r+', label='23 people') plt.grid() plt.ylabel('Probability') plt.xlabel('q') plt.legend() 1 - not_same_birthday(23) """ Explanation: Exercise 2. For computing how big $q$ needs to be we can compute the probability $p^$ that nobody has the same birthday in a group of $q$ and compute $1 - p^$. The first two people will not have the same birthday with probability of $364/365$, the probability that the third will also have a different birthday will be $364/365 * 363 / 365$. this will go on until the last person. One can make the computation and finds that the minimum for having over 50% of probability that at least two people have the same birthday is 23 with p = 50.73%. End of explanation """ import itertools x = [1, 2, 3, 4, 5, 6] omega = set([p for p in itertools.product(x, repeat=3)]) print(r'Omega has', len(omega), 'elements and they are:') print(omega) """ Explanation: Exercise 3. a. Let's define $\Omega$ as all the possible combination we can have with 3 throws of a 6-faced dice. $\Omega$ will be then: End of explanation """ g = binomial(1 / 6, 3, np.arange(4)) * np.array([-30, 50, 75, 100]) np.sum(g) plt.bar(np.arange(4), g) plt.plot([-.5, 3.5], np.ones(2) * np.sum(g), 'r') """ Explanation: X would be -30 when the sample $\omega$ has no 6s, 50 when has one, 75 when it has two, and 100 when it has three. The probability distribution of such variable would be the binomial with $p = 1 / 6$, $n=3$ and $k$ the number of 6s. So: $P_X(X = -30) = {3\choose 0}(1 / 6)^0(1-1/6)^{3-0}$ $P_X(X = 50) = {3\choose 1}(1 / 6)^1(1-1/6)^{3-1}$ $P_X(X = 75) = {3\choose 2}(1 / 6)^2(1-1/6)^{3-2}$ $P_X(X = 100) = {3\choose 3}(1 / 6)^3(1-1/6)^{3-3}$ b. I would be part of this competition, in fact if calculate the mean of $X$ as suggested we obtain $\approx$ 5.67(€). End of explanation """
Upward-Spiral-Science/team1
code/Assignment11_Akash.ipynb
apache-2.0
from mpl_toolkits.mplot3d import axes3d import matplotlib.pyplot as plt %matplotlib inline import numpy as np import urllib2 import scipy.stats as stats np.set_printoptions(precision=3, suppress=True) url = ('https://raw.githubusercontent.com/Upward-Spiral-Science' '/data/master/syn-density/output.csv') data = urllib2.urlopen(url) csv = np.genfromtxt(data, delimiter=",")[1:] # don't want first row (labels) # chopping data based on thresholds on x and y coordinates x_bounds = (409, 3529) y_bounds = (1564, 3124) def check_in_bounds(row, x_bounds, y_bounds): if row[0] < x_bounds[0] or row[0] > x_bounds[1]: return False if row[1] < y_bounds[0] or row[1] > y_bounds[1]: return False if row[3] == 0: return False return True indices_in_bound, = np.where(np.apply_along_axis(check_in_bounds, 1, csv, x_bounds, y_bounds)) data_thresholded = csv[indices_in_bound] n = data_thresholded.shape[0] def synapses_over_unmasked(row): s = (row[4]/row[3])*(64**3) return [row[0], row[1], row[2], s] syn_unmasked = np.apply_along_axis(synapses_over_unmasked, 1, data_thresholded) syn_normalized = syn_unmasked print 'end setup' """ Explanation: Assignment 11: Akash End of explanation """ import sklearn.mixture as mixture n_clusters = 4 gmm = mixture.GMM(n_components=n_clusters, n_iter=1000, covariance_type='diag') labels = gmm.fit_predict(syn_unmasked) clusters = [] for l in range(n_clusters): a = np.where(labels == l) clusters.append(syn_unmasked[a,:]) print len(clusters) print clusters[0].shape """ Explanation: Label Data by It's Associated Cluster Cluster number from previous homeworks where optimal cluster number was 4 (jay helped adapt) End of explanation """ # Regression (x,y,z,syn/unmasked) on cleaned data ################################## # Load regressions from sklearn.linear_model import LinearRegression from sklearn.svm import LinearSVR from sklearn.neighbors import KNeighborsRegressor as KNN from sklearn.ensemble import RandomForestRegressor as RF from sklearn.preprocessing import PolynomialFeatures as PF from sklearn.pipeline import Pipeline from sklearn import cross_validation names = ['Linear Regression','SVR','KNN Regression','Random Forest Regression','Polynomial Regression'] regressions = [LinearRegression(), LinearSVR(C=1.0), KNN(n_neighbors=10, algorithm='auto'), RF(max_depth=5, max_features=1), Pipeline([('poly', PF(degree=2)),('linear', LinearRegression(fit_intercept=False))])] k_fold = 10 counter = 0 for cluster in clusters: s = cluster.shape cluster = cluster.reshape((s[1], s[2])) counter += 1 print print'Regression on cluster: ' + str(counter) X = cluster[:, (0,1,2)] # x,y,z Y = cluster[:,-1] # syn/unmasked from spike for idx2, reg in enumerate(regressions): scores = cross_validation.cross_val_score(reg, X, Y, scoring='r2', cv=k_fold) print("R^2 of %s: %0.2f (+/- %0.2f)" % (names[idx2], scores.mean(), scores.std() * 2)) print "Regression done" """ Explanation: 1) Run general regressions on data associated with actual cluster Running our previous regressions on actual clusters (before they were split by visual estimates), we find the following: * Cluster 1: No regression fits * * asdf * Cluster 2: Knn is the most promising at R^2: 0.19 (+/- 0.11) * Cluster 3: The following regressions are the most promising: R^2 of KNN Regression: 0.27 (+/- 0.16) R^2 of Random Forest Regression: 0.34 (+/- 0.10) R^2 of Polynomial Regression: 0.22 (+/- 0.17) * Cluster 4: R^2 of Random Forest Regression: 0.14 (+/- 0.07) As we can see, nothing is extrodinary, but Cluster 3 with a RFR is the closest one with the original parameters. End of explanation """ counter = 0 for cluster in clusters: s = cluster.shape cluster = cluster.reshape((s[1], s[2])) counter += 1 print print'Regression on cluster: ' + str(counter) X = cluster[:, (0,1,2)] # x,y,z Y = cluster[:,-1] # syn/unmasked from spike for power in range(2,10): a = Pipeline([('poly', PF(degree=power)),('linear', LinearRegression(fit_intercept=False))]) scores = cross_validation.cross_val_score(a, X, Y, scoring='r2', cv=k_fold) print("R^2 of Polynomial Regression of power of %i: %0.2f (+/- %0.2f)" % (power, scores.mean(), scores.std() * 2)) print "Regression on section done" """ Explanation: 2) Change polynomial degree for regression on all data, by cluster End of explanation """ counter = 0 for cluster in clusters: s = cluster.shape cluster = cluster.reshape((s[1], s[2])) counter += 1 print print'Regression on cluster: ' + str(counter) X = cluster[:, (0,1,2)] # x,y,z Y = cluster[:,-1] # syn/unmasked from spike for depth in range(3,12): a = RF(max_depth=5, max_features=1) scores = cross_validation.cross_val_score(a, X, Y, scoring='r2', cv=k_fold) print("R^2 of Random Forrest Regression of Depth %i: %0.2f (+/- %0.2f)" % (depth, scores.mean(), scores.std() * 2)) a = RF(max_features=1) scores = cross_validation.cross_val_score(a, X, Y, scoring='r2', cv=k_fold) print("R^2 of Random Forrest Regression of no set depth: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2)) print "Regression on section done" """ Explanation: 3) Change Random Forest Regression Parameters Only on cluster 3 does there seem to be any relation, even if it is weak. Why is it all always the same? End of explanation """ counter = 0 for cluster in clusters: s = cluster.shape cluster = cluster.reshape((s[1], s[2])) counter += 1 print print'Regression on cluster: ' + str(counter) X = cluster[:, (0,1,2)] # x,y,z Y = cluster[:,-1] # syn/unmasked from spike for neighbor in range(5,30): a = KNN(n_neighbors=neighbor, algorithm='auto') scores = cross_validation.cross_val_score(a, X, Y, scoring='r2', cv=k_fold) print("R^2 of KNN w/ %i neighbors: %0.2f (+/- %0.2f)" % (neighbor, scores.mean(), scores.std() * 2)) print "Regression on section done" """ Explanation: 4) Changing known nearest neighbor parameters In cluster 4, the more neighbors there are, the higher the relationship How do i get into cluster 4 to go deeper, if statement logic not working as == 4? End of explanation """ counter = 0 fig, axs = plt.subplots(1,4, figsize=(20,5)) for cluster in clusters: s = cluster.shape cluster = cluster.reshape((s[1], s[2])) counter += 1 print print'Working on cluster: ' + str(counter) X = cluster[:, (0,1,2)] # x,y,z Y = cluster[:,-1] # syn/unmasked from spike figure = plt.figure() axs[counter-1].hist(cluster[:,-1],100) axs[counter-1].set_title('Histogram of Density w/in Cluster#: '+ str(counter)) axs[counter-1].set_xlabel('Density') axs[counter-1].set_ylabel('Frequency') axs[counter-1].set_ylim([0,500]) print "Done with cluster" plt.show() """ Explanation: 5) Density distrubtion in each cluster Interestingly, cluster 3 has many values around zero. End of explanation """
mne-tools/mne-tools.github.io
0.14/_downloads/plot_artifacts_correction_ica.ipynb
bsd-3-clause
import numpy as np import mne from mne.datasets import sample from mne.preprocessing import ICA from mne.preprocessing import create_eog_epochs, create_ecg_epochs # getting some data ready data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' raw = mne.io.read_raw_fif(raw_fname, preload=True) raw.filter(1, 40, n_jobs=2) # 1Hz high pass is often helpful for fitting ICA picks_meg = mne.pick_types(raw.info, meg=True, eeg=False, eog=False, stim=False, exclude='bads') """ Explanation: Artifact Correction with ICA ICA finds directions in the feature space corresponding to projections with high non-Gaussianity. We thus obtain a decomposition into independent components, and the artifact's contribution is localized in only a small number of components. These components have to be correctly identified and removed. If EOG or ECG recordings are available, they can be used in ICA to automatically select the corresponding artifact components from the decomposition. To do so, you have to first build an Epoch object around blink or heartbeat event. End of explanation """ n_components = 25 # if float, select n_components by explained variance of PCA method = 'fastica' # for comparison with EEGLAB try "extended-infomax" here decim = 3 # we need sufficient statistics, not all time points -> saves time # we will also set state of the random number generator - ICA is a # non-deterministic algorithm, but we want to have the same decomposition # and the same order of components each time this tutorial is run random_state = 23 """ Explanation: Before applying artifact correction please learn about your actual artifacts by reading tut_artifacts_detect. Fit ICA ICA parameters: End of explanation """ ica = ICA(n_components=n_components, method=method, random_state=random_state) print(ica) """ Explanation: Define the ICA object instance End of explanation """ reject = dict(mag=5e-12, grad=4000e-13) ica.fit(raw, picks=picks_meg, decim=decim, reject=reject) print(ica) """ Explanation: we avoid fitting ICA on crazy environmental artifacts that would dominate the variance and decomposition End of explanation """ ica.plot_components() # can you spot some potential bad guys? """ Explanation: Plot ICA components End of explanation """ # first, component 0: ica.plot_properties(raw, picks=0) """ Explanation: Component properties Let's take a closer look at properties of first three independent components. End of explanation """ ica.plot_properties(raw, picks=0, psd_args={'fmax': 35.}) """ Explanation: we can see that the data were filtered so the spectrum plot is not very informative, let's change that: End of explanation """ ica.plot_properties(raw, picks=[1, 2], psd_args={'fmax': 35.}) """ Explanation: we can also take a look at multiple different components at once: End of explanation """ # uncomment the code below to test the inteactive mode of plot_components: # ica.plot_components(picks=range(10), inst=raw) """ Explanation: Instead of opening individual figures with component properties, we can also pass an instance of Raw or Epochs in inst arument to ica.plot_components. This would allow us to open component properties interactively by clicking on individual component topomaps. In the notebook this woks only when running matplotlib in interactive mode (%matplotlib). End of explanation """ eog_average = create_eog_epochs(raw, reject=dict(mag=5e-12, grad=4000e-13), picks=picks_meg).average() # We simplify things by setting the maximum number of components to reject n_max_eog = 1 # here we bet on finding the vertical EOG components eog_epochs = create_eog_epochs(raw, reject=reject) # get single EOG trials eog_inds, scores = ica.find_bads_eog(eog_epochs) # find via correlation ica.plot_scores(scores, exclude=eog_inds) # look at r scores of components # we can see that only one component is highly correlated and that this # component got detected by our correlation analysis (red). ica.plot_sources(eog_average, exclude=eog_inds) # look at source time course """ Explanation: Advanced artifact detection Let's use a more efficient way to find artefacts End of explanation """ ica.plot_properties(eog_epochs, picks=eog_inds, psd_args={'fmax': 35.}, image_args={'sigma': 1.}) """ Explanation: We can take a look at the properties of that component, now using the data epoched with respect to EOG events. We will also use a little bit of smoothing along the trials axis in the epochs image: End of explanation """ print(ica.labels_) """ Explanation: That component is showing a prototypical average vertical EOG time course. Pay attention to the labels, a customized read-out of the mne.preprocessing.ICA.labels_: End of explanation """ ica.plot_overlay(eog_average, exclude=eog_inds, show=False) # red -> before, black -> after. Yes! We remove quite a lot! # to definitely register this component as a bad one to be removed # there is the ``ica.exclude`` attribute, a simple Python list ica.exclude.extend(eog_inds) # from now on the ICA will reject this component even if no exclude # parameter is passed, and this information will be stored to disk # on saving # uncomment this for reading and writing # ica.save('my-ica.fif') # ica = read_ica('my-ica.fif') """ Explanation: These labels were used by the plotters and are added automatically by artifact detection functions. You can also manually edit them to annotate components. Now let's see how we would modify our signals if we removed this component from the data End of explanation """ ecg_epochs = create_ecg_epochs(raw, tmin=-.5, tmax=.5) ecg_inds, scores = ica.find_bads_ecg(ecg_epochs, method='ctps') ica.plot_properties(ecg_epochs, picks=ecg_inds, psd_args={'fmax': 35.}) """ Explanation: Exercise: find and remove ECG artifacts using ICA! End of explanation """ from mne.preprocessing.ica import corrmap # noqa """ Explanation: What if we don't have an EOG channel? We could either: make a bipolar reference from frontal EEG sensors and use as virtual EOG channel. This can be tricky though as you can only hope that the frontal EEG channels only reflect EOG and not brain dynamics in the prefrontal cortex. go for a semi-automated approach, using template matching. In MNE-Python option 2 is easily achievable and it might give better results, so let's have a look at it. End of explanation """ # We'll start by simulating a group of subjects or runs from a subject start, stop = [0, len(raw.times) - 1] intervals = np.linspace(start, stop, 4, dtype=int) icas_from_other_data = list() raw.pick_types(meg=True, eeg=False) # take only MEG channels for ii, start in enumerate(intervals): if ii + 1 < len(intervals): stop = intervals[ii + 1] print('fitting ICA from {0} to {1} seconds'.format(start, stop)) this_ica = ICA(n_components=n_components, method=method).fit( raw, start=start, stop=stop, reject=reject) icas_from_other_data.append(this_ica) """ Explanation: The idea behind corrmap is that artefact patterns are similar across subjects and can thus be identified by correlating the different patterns resulting from each solution with a template. The procedure is therefore semi-automatic. :func:mne.preprocessing.corrmap hence takes a list of ICA solutions and a template, that can be an index or an array. As we don't have different subjects or runs available today, here we will simulate ICA solutions from different subjects by fitting ICA models to different parts of the same recording. Then we will use one of the components from our original ICA as a template in order to detect sufficiently similar components in the simulated ICAs. The following block of code simulates having ICA solutions from different runs/subjects so it should not be used in real analysis - use independent data sets instead. End of explanation """ print(icas_from_other_data) """ Explanation: Remember, don't do this at home! Start by reading in a collection of ICA solutions instead. Something like: icas = [mne.preprocessing.read_ica(fname) for fname in ica_fnames] End of explanation """ reference_ica = ica """ Explanation: We use our original ICA as reference. End of explanation """ reference_ica.plot_components() """ Explanation: Investigate our reference ICA: End of explanation """ reference_ica.plot_sources(eog_average, exclude=eog_inds) """ Explanation: Which one is the bad EOG component? Here we rely on our previous detection algorithm. You would need to decide yourself if no automatic detection was available. End of explanation """ icas = [reference_ica] + icas_from_other_data template = (0, eog_inds[0]) """ Explanation: Indeed it looks like an EOG, also in the average time course. We construct a list where our reference run is the first element. Then we can detect similar components from the other runs (the other ICA objects) using :func:mne.preprocessing.corrmap. So our template must be a tuple like (reference_run_index, component_index): End of explanation """ fig_template, fig_detected = corrmap(icas, template=template, label="blinks", show=True, threshold=.8, ch_type='mag') """ Explanation: Now we can run the CORRMAP algorithm. End of explanation """ eog_component = reference_ica.get_components()[:, eog_inds[0]] # If you calculate a new ICA solution, you can provide this array instead of # specifying the template in reference to the list of ICA objects you want # to run CORRMAP on. (Of course, the retrieved component map arrays can # also be used for other purposes than artifact correction.) # # You can also use SSP to correct for artifacts. It is a bit simpler and # faster but also less precise than ICA and requires that you know the event # timing of your artifact. # See :ref:`tut_artifacts_correct_ssp`. """ Explanation: Nice, we have found similar ICs from the other (simulated) runs! In this way, you can detect a type of artifact semi-automatically for example for all subjects in a study. The detected template can also be retrieved as an array and stored; this array can be used as an alternative template to :func:mne.preprocessing.corrmap. End of explanation """
idekerlab/deep-cell
data-builder/tree_generator-clixo-final.ipynb
mit
# Load data sets import pandas as pd treeSourceUrl = './data/preds_yeastnet_no_gi_0.04_0.5.txt.propagate.small_parent_tree' geneCountFile = './data/preds_yeastnet_no_gi_0.04_0.5.txt.propagate.term_sizes' alignmentFile = './data/alignments_FDR_0.1_t_0.1' geneAssignment = './data/preds_yeastnet_no_gi_0.04_0.5.txt.propagate.mapping' # Load the tree data treeColNames = ['parent', 'child', 'type', 'in_tree'] tree = pd.read_csv(treeSourceUrl, delimiter='\t', names=treeColNames) tree.tail() assignment = pd.read_csv(geneAssignment, sep='\t', names=['gene', 'clixo']) print(assignment['clixo'].unique().shape) assignment.head() al = pd.read_csv(alignmentFile, sep='\t', names=['clixo', 'go', 'similarity', 'fdr', 'genes']) al.head() mapping = {} for row in al.itertuples(): entry = { 'go': row[2], 'score': row[3], 'dfr': row[4] } mapping[str(row[1])] = entry geneCounts = pd.read_csv(geneCountFile, names=['clixo', 'count'], sep='\t') term2count = {} for row in geneCounts.itertuples(): term2count[str(row[1])] = row[2].item() # Get unique terms clixo_terms = set() for row in tree.itertuples(): etype = row[3] if not etype.startswith('gene'): clixo_terms.add(str(row[1])) clixo_terms.add(str(row[2])) print(len(clixo_terms)) """ Explanation: CLIXO Ontology Tree Generator This is a notebook to generate tree data file from original table and annotations. This is the final version of the script creating an Cytoscape.js file with gene count. Requirment DAG file for CLIXO Term to gene assignment file GO alignment file CLIXO TERM COUNT = 4805 End of explanation """ import json clixoTree = { 'data': { 'name': 'CLIXO Tree' }, 'elements': { 'nodes': [], 'edges': [] } } print(json.dumps(clixoTree, indent=4)) def get_node(id, count): node = { 'data': { 'id': id, 'geneCount': count } } return node def get_edge(source, target): edge = { 'data': { 'source': target, 'target': source } } return edge edges = [] PREFIX = 'CLIXO:' for row in tree.itertuples(): etype = row[3] in_tree = row[4] if etype.startswith('gene') or in_tree == 'NOT_TREE': continue source = PREFIX + str(row[1]) child = PREFIX + str(row[2]) edges.append(get_edge(source, child)) print(len(edges)) nodes = [] for id in clixo_terms: node = get_node(PREFIX + id, term2count[id]) nodes.append(node) print(len(nodes)) clixoTree['elements']['nodes'] = nodes clixoTree['elements']['edges'] = edges with open('./data/clixo-tree.cyjs', 'w') as outfile: json.dump(clixoTree, outfile) """ Explanation: Build Base CyJS Network End of explanation """ import networkx as nx DG=nx.DiGraph() for node in nodes: DG.add_node(node['data']['id']) for edge in edges: DG.add_edge(edge['data']['source'], edge['data']['target']) import matplotlib.pyplot as plt nx.draw_circular(DG) # pos = nx.nx_pydot.pydot_layout(DG) """ Explanation: Layout with networkx End of explanation """
ML4DS/ML4all
C1.Intro_Classification/Intro_Classification_student.ipynb
mit
# To visualize plots in the notebook %matplotlib inline # Import some libraries that will be necessary for working with data and displaying plots import csv # To read csv files import random import matplotlib.pyplot as plt import numpy as np from scipy import spatial from sklearn import neighbors, datasets """ Explanation: Introduction to Classification. Notebook version: 2.3 (Oct 25, 2020) Author: Jesús Cid Sueiro (jcid@tsc.uc3m.es) Jerónimo Arenas García (jarenas@tsc.uc3m.es) Changes: v.1.0 - First version. Extracted from a former notebook on K-NN v.2.0 - Adapted to Python 3.0 (backcompatible with Python 2.7) v.2.1 - Minor corrections affecting the notation and assumptions v.2.2 - Updated index notation v.2.3 - Adaptation to slides conversion End of explanation """ # Taken from Jason Brownlee notebook. with open('datasets/iris.data', 'r') as csvfile: lines = csv.reader(csvfile) for row in lines: print(','.join(row)) """ Explanation: 1. The Classification problem In a generic classification problem, we are given an observation vector ${\bf x}\in \mathbb{R}^N$ which is known to belong to one and only one category or class, $y$, from the set ${\mathcal Y} = {0, 1, \ldots, M-1}$. The goal of a classifier system is to predict $y$ based on ${\bf x}$. To design the classifier, we are given a collection of labelled observations ${\mathcal D} = {({\bf x}k, y_k)}{k=0}^{K-1}$ where, for each observation ${\bf x}_k$, the value of its true category, $y_k$, is known. 1.1. Binary Classification We will focus in binary classification problems, where the label set is binary, ${\mathcal Y} = {0, 1}$. Despite its simplicity, this is the most frequent case. Many multi-class classification problems are usually solved by decomposing them into a collection of binary problems. 1.2. The independence and identical distribution (i.i.d.) assumption. The classification algorithms, as many other machine learning algorithms, are based on two major underlying hypothesis: Independence: All samples are statistically independent. Identical distribution: All samples in dataset ${\mathcal D}$ have been generated by the same distribution $p_{{\bf X}, Y}({\bf x}, y)$. The i.i.d. assumption is essential to guarantee that a classifier based on ${\mathcal D}$ has a good perfomance when applied to new input samples. The underlying distribution is unknown (if we knew it, we could apply classic decision theory to make optimal predictions). This is why we need the data in ${\mathcal D}$ to design the classifier. 2. A simple classification problem: the Iris dataset (Iris dataset presentation is based on this <a href=http://machinelearningmastery.com/tutorial-to-implement-k-nearest-neighbors-in-python-from-scratch/> Tutorial </a> by <a href=http://machinelearningmastery.com/about/> Jason Brownlee</a>) As an illustration, consider the <a href = http://archive.ics.uci.edu/ml/datasets/Iris> Iris dataset </a>, taken from the <a href=http://archive.ics.uci.edu/ml/> UCI Machine Learning repository </a>. Quoted from the dataset description: This is perhaps the best known database to be found in the pattern recognition literature. The data set contains 3 classes of 50 instances each, where each class refers to a type of iris plant. [...] One class is linearly separable from the other 2; the latter are NOT linearly separable from each other. The class is the species, which is one of setosa, versicolor or virginica. Each instance contains 4 measurements of given flowers: sepal length, sepal width, petal length and petal width, all in centimeters. End of explanation """ # Adapted from a notebook by Jason Brownlee def loadDataset(filename, split): xTrain = [] cTrain = [] xTest = [] cTest = [] with open(filename, 'r') as csvfile: lines = csv.reader(csvfile) dataset = list(lines) for i in range(len(dataset)-1): for y in range(4): dataset[i][y] = float(dataset[i][y]) item = dataset[i] if random.random() < split: xTrain.append(item[0:-1]) cTrain.append(item[-1]) else: xTest.append(item[0:-1]) cTest.append(item[-1]) return xTrain, cTrain, xTest, cTest """ Explanation: 2.1. Training and test Next, we will split the data into two sets: Training set, that will be used to learn the classification model Test set, that will be used to evaluate the classification performance The data partition must be random, in such a way that the statistical distribution of both datasets is the same. The code fragment below defines a function loadDataset that loads the data in a CSV with the provided filename, converts the flower measures (that were loaded as strings) into numbers and, finally, it splits the data into a training and test sets. End of explanation """ xTrain_all, cTrain_all, xTest_all, cTest_all = loadDataset('./datasets/iris.data', 0.67) nTrain_all = len(xTrain_all) nTest_all = len(xTest_all) print('Train:', str(nTrain_all)) print('Test:', str(nTest_all)) """ Explanation: We can use this function to get a data split. An expected ratio of 67/33 samples for train/test will be used. However, note that, because of the way samples are assigned to the train or test datasets, the exact number of samples in each partition will differ if you run the code several times. End of explanation """ i = 2 # Try 0,1,2,3 j = 3 # Try 0,1,2,3 with j!=i # Take coordinates for each class separately xiSe = [xTrain_all[n][i] for n in range(nTrain_all) if cTrain_all[n]=='Iris-setosa'] xjSe = [xTrain_all[n][j] for n in range(nTrain_all) if cTrain_all[n]=='Iris-setosa'] xiVe = [xTrain_all[n][i] for n in range(nTrain_all) if cTrain_all[n]=='Iris-versicolor'] xjVe = [xTrain_all[n][j] for n in range(nTrain_all) if cTrain_all[n]=='Iris-versicolor'] xiVi = [xTrain_all[n][i] for n in range(nTrain_all) if cTrain_all[n]=='Iris-virginica'] xjVi = [xTrain_all[n][j] for n in range(nTrain_all) if cTrain_all[n]=='Iris-virginica'] plt.plot(xiSe, xjSe,'bx', label='Setosa') plt.plot(xiVe, xjVe,'r.', label='Versicolor') plt.plot(xiVi, xjVi,'g+', label='Virginica') plt.xlabel('$x_' + str(i) + '$') plt.ylabel('$x_' + str(j) + '$') plt.legend(loc='best') plt.show() """ Explanation: 2.2. Scatter plots To get some intuition about this four dimensional dataset we can plot 2-dimensional projections taking only two variables each time. End of explanation """ # Select two classes c0 = 'Iris-versicolor' c1 = 'Iris-virginica' # Select two coordinates ind = [0, 1] # Take training test X_tr = np.array([[xTrain_all[n][i] for i in ind] for n in range(nTrain_all) if cTrain_all[n]==c0 or cTrain_all[n]==c1]) C_tr = [c for c in cTrain_all if c==c0 or c==c1] Y_tr = np.array([int(c==c1) for c in C_tr]) n_tr = len(X_tr) # Take test set X_tst = np.array([[xTest_all[n][i] for i in ind] for n in range(nTest_all) if cTest_all[n]==c0 or cTest_all[n]==c1]) C_tst = [c for c in cTest_all if c==c0 or c==c1] Y_tst = np.array([int(c==c1) for c in C_tst]) n_tst = len(X_tst) # Separate components of x into different arrays (just for the plots) x0c0 = [X_tr[n][0] for n in range(n_tr) if Y_tr[n]==0] x1c0 = [X_tr[n][1] for n in range(n_tr) if Y_tr[n]==0] x0c1 = [X_tr[n][0] for n in range(n_tr) if Y_tr[n]==1] x1c1 = [X_tr[n][1] for n in range(n_tr) if Y_tr[n]==1] # Scatterplot. labels = {'Iris-setosa': 'Setosa', 'Iris-versicolor': 'Versicolor', 'Iris-virginica': 'Virginica'} plt.plot(x0c0, x1c0,'r.', label=labels[c0]) plt.plot(x0c1, x1c1,'g+', label=labels[c1]) plt.xlabel('$x_' + str(ind[0]) + '$') plt.ylabel('$x_' + str(ind[1]) + '$') plt.legend(loc='best') plt.show() """ Explanation: In the following, we will design a classifier to separate classes "Versicolor" and "Virginica" using $x_0$ and $x_1$ only. To do so, we build a training set with samples from these categories, and a binary label $y^{(k)} = 1$ for samples in class "Virginica", and $0$ for "Versicolor" data. End of explanation """ print(f'Class 0 {c0}: {n_tr - sum(Y_tr)} samples') print(f'Class 1 ({c1}): {sum(Y_tr)} samples') """ Explanation: 3. A Baseline Classifier: Maximum A Priori. For the selected data set, we have two clases and a dataset with the following class proportions: End of explanation """ y = int(2*sum(Y_tr) > n_tr) print(f'y = {y} ({c1 if y==1 else c0})') """ Explanation: The maximum a priori classifier assigns any sample ${\bf x}$ to the most frequent class in the training set. Therefore, the class prediction $y$ for any sample ${\bf x}$ is End of explanation """ # Training and test error arrays E_tr = (Y_tr != y) E_tst = (Y_tst != y) # Error rates pe_tr = float(sum(E_tr)) / n_tr pe_tst = float(sum(E_tst)) / n_tst print('Pe(train):', str(pe_tr)) print('Pe(test):', str(pe_tst)) """ Explanation: The error rate for this baseline classifier is: End of explanation """
dcavar/python-tutorial-for-ipython
notebooks/Python Word Sense Disambiguation.ipynb
apache-2.0
from nltk.corpus import wordnet """ Explanation: Python Word Sense Disambiguation (C) 2017-2019 by Damir Cavar Version: 1.2, November 2019 License: Creative Commons Attribution-ShareAlike 4.0 International License (CA BY-SA 4.0) This is a tutorial related to the discussion of a WordSense disambiguation and various machine learning strategies discussed in the textbook Machine Learning: The Art and Science of Algorithms that Make Sense of Data by Peter Flach. This tutorial was developed as part of my course material for the courses Machine Learning and Advanced Natural Language Processing in the at Indiana University. Word Sense Disambiguation For a simple Bayesian implementation of a Word Sense Disambiguation algorithm we will use the WordNet NLTK module. We import it in the following way: End of explanation """ mySynsets = wordnet.synsets('bank') print(mySynsets) """ Explanation: For a word that we want to disambiguate, we need to get all its synsets: End of explanation """ for s in mySynsets: print(s.name()) text = " ".join( [s.definition()] + s.examples() ) print(text, "\n", "-" * 20) """ Explanation: For each synset we need to get its definition and the examples to use them as bags of words for a comparison: End of explanation """ import itertools lOfl = [["this"], ["is","a"], ["test"]] print(list(itertools.chain.from_iterable(lOfl))) """ Explanation: We will need to join a list of lists into one list, that is, we need to flatten a list of lists. To achive this, we can use the following code: End of explanation """ from nltk import word_tokenize, pos_tag """ Explanation: What we should do is to tokenize and part-of-speech tag the text, that is the descriptions and the examples. We can use NLTK's word_tokenize and pos_tag modules: End of explanation """ from nltk.corpus import stopwords stopw = stopwords.words("english") for s in mySynsets: print(s.name()) text = pos_tag(word_tokenize(s.definition())) text += list(itertools.chain.from_iterable([ pos_tag(word_tokenize(x)) for x in s.examples() ])) text2 = [ x for x in text if x[0] not in stopw ] print(text2, "\n", "-" * 20) from nltk.stem import WordNetLemmatizer wordnet_lemmatizer = WordNetLemmatizer() wordnet_lemmatizer.lemmatize('dogs') """ Explanation: Now we can tokenize and PoS-tag the texts: End of explanation """ example = "John saw the dogs barking at the cats." keyword = "dog" tokens = word_tokenize(example) lemmas = [ wordnet_lemmatizer.lemmatize(x) for x in tokens ] pos = -1 try: pos = lemmas.index(keyword) except ValueError: pass print("Position:", pos) print(lemmas) posTokens = pos_tag(tokens) print("Lemma:", lemmas[pos]) print(" PoS:", posTokens[pos]) print(" Tag:", posTokens[pos][1]) print(" MTag:", posTokens[pos][1][0]) category = posTokens[pos][1][0] print(category) wType = None if category == 'N': wType = wordnet.NOUN elif category == 'V': wType = wordnet.VERB elif category == 'J': wType = wordnet.ADJ elif category == 'R': wType = wordnet.ADV print("Type:", wType) wordnet.synsets(keyword, pos=wType) for s in wordnet.synsets(keyword, pos=wType): print(s.name()) text = pos_tag(word_tokenize(s.definition())) text += list(itertools.chain.from_iterable([ pos_tag(word_tokenize(x)) for x in s.examples() ])) print(text, "\n", "-" * 20) """ Explanation: The first step that we would take with a text that contains the word that we want to disambiguate is to find its position in the token list. End of explanation """
akront1104/World_Bank_Data
sliderule_dsi_json_exercise.ipynb
mit
import pandas as pd """ Explanation: JSON examples and exercise get familiar with packages for dealing with JSON study examples with JSON strings and files work on exercise to be completed and submitted reference: http://pandas.pydata.org/pandas-docs/stable/io.html#io-json-reader data source: http://jsonstudio.com/resources/ End of explanation """ import json from pandas.io.json import json_normalize """ Explanation: imports for Python, Pandas End of explanation """ # define json string data = [{'state': 'Florida', 'shortname': 'FL', 'info': {'governor': 'Rick Scott'}, 'counties': [{'name': 'Dade', 'population': 12345}, {'name': 'Broward', 'population': 40000}, {'name': 'Palm Beach', 'population': 60000}]}, {'state': 'Ohio', 'shortname': 'OH', 'info': {'governor': 'John Kasich'}, 'counties': [{'name': 'Summit', 'population': 1234}, {'name': 'Cuyahoga', 'population': 1337}]}] # use normalization to create tables from nested element json_normalize(data, 'counties') # further populate tables created from nested element json_normalize(data, 'counties', ['state', 'shortname', ['info', 'governor']]) """ Explanation: JSON example, with string demonstrates creation of normalized dataframes (tables) from nested json string source: http://pandas.pydata.org/pandas-docs/stable/io.html#normalization End of explanation """ # load json as string json.load((open('data/world_bank_projects_less.json'))) # load as Pandas dataframe sample_json_df = pd.read_json('data/world_bank_projects_less.json') sample_json_df """ Explanation: JSON example, with file demonstrates reading in a json file as a string and as a table uses small sample file containing data about projects funded by the World Bank data source: http://jsonstudio.com/resources/ End of explanation """ #import packages import pandas as pd import json from pandas.io.json import json_normalize # Displays all DataFrame columns and rows pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) # Load json file as a string world_bank_file = json.load((open('world_bank_projects.json'))) # Load json file into a Pandas DataFrame world_bank_df = pd.read_json('world_bank_projects.json') #print(world_bank_df.head(5)) #print(world_bank_df.columns) print(world_bank_df.info()) #print(world_bank_df[['mjthemecode','mjtheme']]) # 1. Find the 10 countries with the most projects # Select the countryname and project_name columns from the Pandas DataFrame most_projects = world_bank_df[['countryname', 'project_name']] # Group by countryname, count and sort values from highest to lowest most_projects = most_projects.groupby("countryname").size().sort_values(ascending = False) # Print first 10 rows print(most_projects.head(10)) # 2. Find the top 10 major project themes (using column ‘mjtheme_namecode’) # Use normalization to create tables from nested element major_themes = json_normalize(world_bank_file, 'mjtheme_namecode') # Group by countryname, count and sort values from highest to lowest major_themes = major_themes.groupby("name").size().sort_values(ascending = False) # Print first 10 rows print(major_themes) # 3. In 2. above you will notice that some entries have only the code and the name is missing. # Create a dataframe with the missing names filled in. import numpy as np # Use normalization to create table from 'mjtheme_namecode' column missing_names = json_normalize(world_bank_file, 'mjtheme_namecode') # Replacing empty entries with NaNs missing_names = missing_names.apply(lambda x: x.str.strip()).replace('', np.nan) #print(missing_names) # Finding unique values code_name_map = missing_names.loc[missing_names['name'] != '', :] unique_names = code_name_map.drop_duplicates().dropna() # Creating dictionary from unique_names unique_names_dict_list = unique_names.set_index('code').T.to_dict(orient = 'records') # Extracting dictionary from list unique_names_dict = unique_names_dict_list[0] # Fills NaNs with values from dictionary filled_values = missing_names['name'].fillna(missing_names['code'].map(unique_names_dict)) print(filled_values.value_counts()) """ Explanation: JSON exercise Using data in file 'data/world_bank_projects.json' and the techniques demonstrated above, 1. Find the 10 countries with most projects 2. Find the top 10 major project themes (using column 'mjtheme_namecode') 3. In 2. above you will notice that some entries have only the code and the name is missing. Create a dataframe with the missing names filled in. End of explanation """
kubeflow/pipelines
samples/tutorials/gpu/gpu.ipynb
apache-2.0
import kfp from kfp import dsl def gpu_smoking_check_op(): return dsl.ContainerOp( name='check', image='tensorflow/tensorflow:latest-gpu', command=['sh', '-c'], arguments=['nvidia-smi'] ).set_gpu_limit(1) @dsl.pipeline( name='GPU smoke check', description='smoke check as to whether GPU env is ready.' ) def gpu_pipeline(): gpu_smoking_check = gpu_smoking_check_op() if __name__ == '__main__': kfp.compiler.Compiler().compile(gpu_pipeline, 'gpu_smoking_check.yaml') """ Explanation: This notebook walks you through how to use accelerators for Kubeflow Pipelines steps. Preparation If you installed Kubeflow via kfctl, these steps will have already been done, and you can skip this section. If you installed Kubeflow Pipelines via Google Cloud AI Platform Pipelines UI or Standalone manifest, you willl need to follow these steps to set up your GPU enviroment. Add GPU nodes to your cluster To see which accelerators are available in each zone, run the following command or check the document gcloud compute accelerator-types list You may also check or edit the GCP's GPU Quota to make sure you still have GPU quota in the region. To reduce costs, you may want to create a zero-sized node pool for GPU and enable autoscaling. Here is an example to create a P100 GPU node pool for a cluster. ```shell You may customize these parameters. export GPU_POOL_NAME=p100pool export CLUSTER_NAME=existingClusterName export CLUSTER_ZONE=us-west1-a export GPU_TYPE=nvidia-tesla-p100 export GPU_COUNT=1 export MACHINE_TYPE=n1-highmem-16 Node pool creation may take several minutes. gcloud container node-pools create ${GPU_POOL_NAME} \ --accelerator type=${GPU_TYPE},count=${GPU_COUNT} \ --zone ${CLUSTER_ZONE} --cluster ${CLUSTER_NAME} \ --num-nodes=0 --machine-type=${MACHINE_TYPE} --min-nodes=0 --max-nodes=5 --enable-autoscaling \ --scopes=cloud-platform ``` Here in this sample, we specified --scopes=cloud-platform. More info is here. This scope will allow node pool jobs to use the GCE Default Service Account to access GCP APIs (like GCS, etc.). You can also use Workload Identity or Application Default Credentials to replace --scopes=cloud-platform. Install NVIDIA device driver to the cluster After adding GPU nodes to your cluster, you need to install NVIDIA’s device drivers to the nodes. Google provides a GKE DaemonSet that automatically installs the drivers for you. To deploy the installation DaemonSet, run the following command. You can run this command any time (even before you create your node pool), and you only need to do this once per cluster. shell kubectl apply -f https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/nvidia-driver-installer/cos/daemonset-preloaded.yaml Consume GPU via Kubeflow Pipelines SDK Once your cluster is set up to support GPUs, the next step is to indicate which steps in your pipelines should use accelerators, and what type they should use. Here is a document that describes the options. The following is an example 'smoke test' pipeline, to see if your cluster setup is working properly. End of explanation """ import kfp from kfp import dsl def gpu_p100_op(): return dsl.ContainerOp( name='check_p100', image='tensorflow/tensorflow:latest-gpu', command=['sh', '-c'], arguments=['nvidia-smi'] ).add_node_selector_constraint('cloud.google.com/gke-accelerator', 'nvidia-tesla-p100').container.set_gpu_limit(1) def gpu_v100_op(): return dsl.ContainerOp( name='check_v100', image='tensorflow/tensorflow:latest-gpu', command=['sh', '-c'], arguments=['nvidia-smi'] ).add_node_selector_constraint('cloud.google.com/gke-accelerator', 'nvidia-tesla-v100').container.set_gpu_limit(1) @dsl.pipeline( name='GPU smoke check', description='Smoke check as to whether GPU env is ready.' ) def gpu_pipeline(): gpu_p100 = gpu_p100_op() gpu_v100 = gpu_v100_op() if __name__ == '__main__': kfp.compiler.Compiler().compile(gpu_pipeline, 'gpu_smoking_check.yaml') """ Explanation: You may see a warning message from Kubeflow Pipeline logs saying "Insufficient nvidia.com/gpu". If so, this probably means that your GPU-enabled node is still spinning up; please wait for few minutes. You can check the current nodes in your cluster like this: kubectl get nodes -o wide If everything runs as expected, the nvidia-smi command should list the CUDA version, GPU type, usage, etc. (See the logs panel in the pipeline UI to view output). You may also notice that after the pipeline step's GKE pod has finished, the new GPU cluster node is still there. GKE autoscale algorithm will free that node if no usage for certain time. More info is here. Multiple GPUs pool in one cluster It's possible you want more than one type of GPU to be supported in one cluster. There are several types of GPUs. Certain regions often support a only subset of the GPUs (document). Since we can set --num-nodes=0 for certain GPU node pool to save costs if no workload, we can create multiple node pools for different types of GPUs. Add additional GPU nodes to your cluster In a previous section, we added a node pool for P100s. Here we add another pool for V100s. ```shell You may customize these parameters. export GPU_POOL_NAME=v100pool export CLUSTER_NAME=existingClusterName export CLUSTER_ZONE=us-west1-a export GPU_TYPE=nvidia-tesla-v100 export GPU_COUNT=1 export MACHINE_TYPE=n1-highmem-8 Node pool creation may take several minutes. gcloud container node-pools create ${GPU_POOL_NAME} \ --accelerator type=${GPU_TYPE},count=${GPU_COUNT} \ --zone ${CLUSTER_ZONE} --cluster ${CLUSTER_NAME} \ --num-nodes=0 --machine-type=${MACHINE_TYPE} --min-nodes=0 --max-nodes=5 --enable-autoscaling ``` Consume certain GPU via Kubeflow Pipelines SDK If your cluster has multiple GPU node pools, you can explicitly specify that a given pipeline step should use a particular type of accelerator. This example shows how to use P100s for one pipeline step, and V100s for another. End of explanation """ import kfp import kfp.gcp as gcp from kfp import dsl def gpu_p100_op(): return dsl.ContainerOp( name='check_p100', image='tensorflow/tensorflow:latest-gpu', command=['sh', '-c'], arguments=['nvidia-smi'] ).add_node_selector_constraint('cloud.google.com/gke-accelerator', 'nvidia-tesla-p100').container.set_gpu_limit(1) def gpu_v100_op(): return dsl.ContainerOp( name='check_v100', image='tensorflow/tensorflow:latest-gpu', command=['sh', '-c'], arguments=['nvidia-smi'] ).add_node_selector_constraint('cloud.google.com/gke-accelerator', 'nvidia-tesla-v100').container.set_gpu_limit(1) def gpu_v100_preemptible_op(): v100_op = dsl.ContainerOp( name='check_v100_preemptible', image='tensorflow/tensorflow:latest-gpu', command=['sh', '-c'], arguments=['nvidia-smi']) v100_op.container.set_gpu_limit(1) v100_op.add_node_selector_constraint('cloud.google.com/gke-accelerator', 'nvidia-tesla-v100') v100_op.apply(gcp.use_preemptible_nodepool(hard_constraint=True)) return v100_op @dsl.pipeline( name='GPU smoking check', description='Smoking check whether GPU env is ready.' ) def gpu_pipeline(): gpu_p100 = gpu_p100_op() gpu_v100 = gpu_v100_op() gpu_v100_preemptible = gpu_v100_preemptible_op() if __name__ == '__main__': kfp.compiler.Compiler().compile(gpu_pipeline, 'gpu_smoking_check.yaml') """ Explanation: You should see different "nvidia-smi" logs from the two pipeline steps. Using Preemptible GPUs A Preemptible GPU resource is cheaper, but use of these instances means that a pipeline step has the potential to be aborted and then retried. This means that pipeline steps used with preemptible instances must be idempotent (the step gives the same results if run again), or creates some kind of checkpoint so that it can pick up where it left off. To use preemptible GPUs, create a node pool as follows. Then when specifying a pipeline, you can indicate use of a preemptible node pool for a step. The only difference in the following node-pool creation example is that the --preemptible and --node-taints=preemptible=true:NoSchedule parameters have been added. ``` export GPU_POOL_NAME=v100pool-preemptible export CLUSTER_NAME=existingClusterName export CLUSTER_ZONE=us-west1-a export GPU_TYPE=nvidia-tesla-v100 export GPU_COUNT=1 export MACHINE_TYPE=n1-highmem-8 gcloud container node-pools create ${GPU_POOL_NAME} \ --accelerator type=${GPU_TYPE},count=${GPU_COUNT} \ --zone ${CLUSTER_ZONE} --cluster ${CLUSTER_NAME} \ --preemptible \ --node-taints=preemptible=true:NoSchedule \ --num-nodes=0 --machine-type=${MACHINE_TYPE} --min-nodes=0 --max-nodes=5 --enable-autoscaling ``` Then, you can define a pipeline as follows (note the use of use_preemptible_nodepool()). End of explanation """
kimkipyo/dss_git_kkp
Python 복습/11일차.목_Pandas/11일차_3T_ajax 로 이루어진 select, option 정보 가져오기 ( feat, 건강보험심사평가원 ).ipynb
mit
import requests from bs4 import BeautifulSoup #requests한 것을 parsiing하기 위해서 response = requests.get("http://www.hira.or.kr/re/diag/getDiagAmtList.do") dom = BeautifulSoup(response.text, "html.parser") dom.select_one("#sidoCd") select_element = dom.select_one("#sidoCd") print(select_element) """ Explanation: 3T_ajax 로 이루어진 select, option 정보 가져오기 ( feat, 건강보험심사평가원 ) 시군구 읍면동 상세정보를 크롤링해서 => 하나의 DataFrame으로 만드는 실습 먼저 해볼 것은 requests를 보내봐야 한다. End of explanation """ response = requests.post("http://www.hira.or.kr/rd/hosp/selectSidoListAjax.do") response.text """ Explanation: 주소를 동적으로 ( ajax / javascript ) 를 이용해서 보내주는구나. 다음에는? 네트워크를 확인한다. 동적이 아닐 수도 있으니까. 정적일수도 있으니까 End of explanation """ import json json.loads(response.text) #str => dict response.json() #위에랑 동일하지만 더 쉽게 쓸 수 있다. data = response.json() """ Explanation: json => dict End of explanation """ data.get("data")[0] df = pd.DataFrame(data.get("data")) df data = { "sidoCd": 11 } response = requests.post("http://www.hira.or.kr/rd/hosp/selectSgguListAjax.do", data=data) data = response.json() city_df = pd.DataFrame(data.get("data")) city_df data = { "sidoCdNm": "서울", "sgguCdNm": "강남구", #우리가 별도로 urlencoded를 할 필요가 없다. } response = requests.post("http://www.hira.or.kr/rd/hosp/selectDoroListAjax.do", data=data) data = response.json() doro_df = pd.DataFrame(data.get('data')) doro_df # 도로명 주소 => DF # 동명 => DF pd.DataFrame(columns=["State", "City", "Town"]) # 서울 => 강남구.csv, 중구.csv. .... # 부산 => ... # 어떤 방법이든 상관 X # (1) 특정 조건에 맞는 데이터를 뽑아내는 방법 # (2) df for 문 돌리는 방법 ( enumerate ) response = requests.get("https://api.zigbang.com/v1/items?detail=true&item_ids=4620292&item_ids=4366382&item_ids=4566963&item_ids=4585208&item_ids=4560308&item_ids=4552724&item_ids=4344484&item_ids=4612042&item_ids=4574810&item_ids=4588687&item_ids=4387287&item_ids=4538842&item_ids=4557985&item_ids=4579464&item_ids=4607349&item_ids=4603203&item_ids=4341393&item_ids=4575315&item_ids=4350877&item_ids=4538375&item_ids=4616443&item_ids=4281504&item_ids=4556024&item_ids=4550034&item_ids=4512172&item_ids=4507118&item_ids=4606156&item_ids=4457169&item_ids=4526327&item_ids=4407071&item_ids=4582264&item_ids=4607937&item_ids=4395275&item_ids=4568603&item_ids=4569329&item_ids=4564865&item_ids=4551098&item_ids=4617261&item_ids=4536918&item_ids=4614718&item_ids=4614198&item_ids=4610604&item_ids=4578711&item_ids=4593621&item_ids=4612621&item_ids=4518874&item_ids=4533169&item_ids=4409063&item_ids=4617602&item_ids=4477945&item_ids=4249606&item_ids=4560223&item_ids=4570020&item_ids=4517907&item_ids=4530774&item_ids=4525210&item_ids=4596138&item_ids=4588994&item_ids=4612357&item_ids=4411862") data = response.json() df = pd.DataFrame(data.get('items')) df zigzig = [ item.get('item') for item in data.get('items') ] df = pd.DataFrame(zigzig) df df.describe() len(df) df["deposit"] #2000만원 이하이면서 월세 50만원 이하인 거 찾기 df[df["deposit"] < 2000] #행렬곱의 개념 matched_df = df[df["deposit"] < 2000][df["rent"] < 50] matched_df.deposit for index, column in enumerate(df): print(column) for i in len(df): #보통은 이걸로 쓴다. """ Explanation: 만약 네트워크에 정보가 잡히지 않으면? javascipt로 네트워크 요청이 아니라 그냥 쌩으로 써준 것이다. 내부에 이미 정보가 있다. 그래서 일일이 찾아봐야 하는 번거로움 대부분 동적으로 API 받는 형태로 되어 있다. 로그인이 필요하다고 했을 때 selenium 으로 쓰면 된다. End of explanation """
sdpython/pyquickhelper
_unittests/ut_helpgen/data_gallery/notebooks/2a/notebook_convert.ipynb
mit
%%javascript var kernel = IPython.notebook.kernel; var body = document.body, attribs = body.attributes; var command = "theNotebook = " + "'"+attribs['data-notebook-name'].value+"'"; kernel.execute(command); if "theNotebook" in locals(): a=theNotebook else: a="pas trouvé" a """ Explanation: Convert a notebook into a document First, we need to retrieve the notebook name (see How to I get the current IPython Notebook name): End of explanation """ from pyquickhelper.helpgen.utils_pywin32 import import_pywin32 import_pywin32() """ Explanation: On Windows, you might need to execute the following trick (see Pywin32 does not find its DLL). End of explanation """ from nbconvert import HTMLExporter exportHtml = HTMLExporter() if a != "pas trouvé": body,resources = exportHtml.from_filename(theNotebook) with open("conv_notebook.html","w",encoding="utf8") as f : f.write(body) """ Explanation: Then, we call the following code: End of explanation """ from nbconvert import RSTExporter exportRst = RSTExporter() if a != "pas trouvé": body,resources = exportRst.from_filename(theNotebook) with open("conv_notebook.rst","w",encoding="utf8") as f : f.write(body) """ Explanation: We can do it with the RST format (see RSTExporter). End of explanation """ from IPython.display import FileLink FileLink("conv_notebook.rst") """ Explanation: If you need to add custom RST instructions, you could add HTML comments: <!--RST ..index:: conversion,nbconvert !RST--> &lt;!--RST ..index:: conversion,nbconvert !RST--&gt; And write custom code to add it to your RST file. Finally, if you want to retrieve the download a local file such as the RST conversion for example: <!--RST ..index:: FileLink !RST--> End of explanation """
mne-tools/mne-tools.github.io
0.17/_downloads/af4923da095ff8767e419fa9e705bbba/plot_dipole_orientations.ipynb
bsd-3-clause
from mayavi import mlab import mne from mne.datasets import sample from mne.minimum_norm import make_inverse_operator, apply_inverse data_path = sample.data_path() evokeds = mne.read_evokeds(data_path + '/MEG/sample/sample_audvis-ave.fif') left_auditory = evokeds[0].apply_baseline() fwd = mne.read_forward_solution( data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif') mne.convert_forward_solution(fwd, surf_ori=True, copy=False) noise_cov = mne.read_cov(data_path + '/MEG/sample/sample_audvis-cov.fif') subjects_dir = data_path + '/subjects' """ Explanation: The role of dipole orientations in distributed source localization When performing source localization in a distributed manner (MNE/dSPM/sLORETA/eLORETA), the source space is defined as a grid of dipoles that spans a large portion of the cortex. These dipoles have both a position and an orientation. In this tutorial, we will look at the various options available to restrict the orientation of the dipoles and the impact on the resulting source estimate. Loading data Load everything we need to perform source localization on the sample dataset. End of explanation """ lh = fwd['src'][0] # Visualize the left hemisphere verts = lh['rr'] # The vertices of the source space tris = lh['tris'] # Groups of three vertices that form triangles dip_pos = lh['rr'][lh['vertno']] # The position of the dipoles white = (1.0, 1.0, 1.0) # RGB values for a white color gray = (0.5, 0.5, 0.5) # RGB values for a gray color red = (1.0, 0.0, 0.0) # RGB valued for a red color mlab.figure(size=(600, 400), bgcolor=white) # Plot the cortex mlab.triangular_mesh(verts[:, 0], verts[:, 1], verts[:, 2], tris, color=gray) # Mark the position of the dipoles with small red dots mlab.points3d(dip_pos[:, 0], dip_pos[:, 1], dip_pos[:, 2], color=red, scale_factor=1E-3) mlab.view(azimuth=180, distance=0.25) """ Explanation: The source space Let's start by examining the source space as constructed by the :func:mne.setup_source_space function. Dipoles are placed along fixed intervals on the cortex, determined by the spacing parameter. The source space does not define the orientation for these dipoles. End of explanation """ mlab.figure(size=(600, 400), bgcolor=white) # Plot the cortex mlab.triangular_mesh(verts[:, 0], verts[:, 1], verts[:, 2], tris, color=gray) # Show the dipoles as arrows pointing along the surface normal normals = lh['nn'][lh['vertno']] mlab.quiver3d(dip_pos[:, 0], dip_pos[:, 1], dip_pos[:, 2], normals[:, 0], normals[:, 1], normals[:, 2], color=red, scale_factor=1E-3) mlab.view(azimuth=180, distance=0.1) """ Explanation: Fixed dipole orientations While the source space defines the position of the dipoles, the inverse operator defines the possible orientations of them. One of the options is to assign a fixed orientation. Since the neural currents from which MEG and EEG signals originate flows mostly perpendicular to the cortex [1]_, restricting the orientation of the dipoles accordingly places a useful restriction on the source estimate. By specifying fixed=True when calling :func:mne.minimum_norm.make_inverse_operator, the dipole orientations are fixed to be orthogonal to the surface of the cortex, pointing outwards. Let's visualize this: End of explanation """ # Compute the source estimate for the 'left - auditory' condition in the sample # dataset. inv = make_inverse_operator(left_auditory.info, fwd, noise_cov, fixed=True) stc = apply_inverse(left_auditory, inv, pick_ori=None) # Visualize it at the moment of peak activity. _, time_max = stc.get_peak(hemi='lh') brain = stc.plot(surface='white', subjects_dir=subjects_dir, initial_time=time_max, time_unit='s', size=(600, 400)) """ Explanation: Restricting the dipole orientations in this manner leads to the following source estimate for the sample data: End of explanation """ mlab.figure(size=(600, 400), bgcolor=white) # Define some more colors green = (0.0, 1.0, 0.0) blue = (0.0, 0.0, 1.0) # Plot the cortex mlab.triangular_mesh(verts[:, 0], verts[:, 1], verts[:, 2], tris, color=gray) # Make an inverse operator with loose dipole orientations inv = make_inverse_operator(left_auditory.info, fwd, noise_cov, fixed=False, loose=1.0) # Show the three dipoles defined at each location in the source space dip_dir = inv['source_nn'].reshape(-1, 3, 3) dip_dir = dip_dir[:len(dip_pos)] # Only select left hemisphere for ori, color in zip((0, 1, 2), (red, green, blue)): mlab.quiver3d(dip_pos[:, 0], dip_pos[:, 1], dip_pos[:, 2], dip_dir[:, ori, 0], dip_dir[:, ori, 1], dip_dir[:, ori, 2], color=color, scale_factor=1E-3) mlab.view(azimuth=180, distance=0.1) """ Explanation: The direction of the estimated current is now restricted to two directions: inward and outward. In the plot, blue areas indicate current flowing inwards and red areas indicate current flowing outwards. Given the curvature of the cortex, groups of dipoles tend to point in the same direction: the direction of the electromagnetic field picked up by the sensors. Loose dipole orientations Forcing the source dipoles to be strictly orthogonal to the cortex makes the source estimate sensitive to the spacing of the dipoles along the cortex, since the curvature of the cortex changes within each ~10 square mm patch. Furthermore, misalignment of the MEG/EEG and MRI coordinate frames is more critical when the source dipole orientations are strictly constrained [2]_. To lift the restriction on the orientation of the dipoles, the inverse operator has the ability to place not one, but three dipoles at each location defined by the source space. These three dipoles are placed orthogonally to form a Cartesian coordinate system. Let's visualize this: End of explanation """ # Compute the source estimate, indicate that we want a vector solution stc = apply_inverse(left_auditory, inv, pick_ori='vector') # Visualize it at the moment of peak activity. _, time_max = stc.magnitude().get_peak(hemi='lh') brain = stc.plot(subjects_dir=subjects_dir, initial_time=time_max, time_unit='s', size=(600, 400), overlay_alpha=0) """ Explanation: When computing the source estimate, the activity at each of the three dipoles is collapsed into the XYZ components of a single vector, which leads to the following source estimate for the sample data: End of explanation """ # Set loose to 0.2, the default value inv = make_inverse_operator(left_auditory.info, fwd, noise_cov, fixed=False, loose=0.2) stc = apply_inverse(left_auditory, inv, pick_ori='vector') # Visualize it at the moment of peak activity. _, time_max = stc.magnitude().get_peak(hemi='lh') brain = stc.plot(subjects_dir=subjects_dir, initial_time=time_max, time_unit='s', size=(600, 400), overlay_alpha=0) """ Explanation: Limiting orientations, but not fixing them Often, the best results will be obtained by allowing the dipoles to have somewhat free orientation, but not stray too far from a orientation that is perpendicular to the cortex. The loose parameter of the :func:mne.minimum_norm.make_inverse_operator allows you to specify a value between 0 (fixed) and 1 (unrestricted or "free") to indicate the amount the orientation is allowed to deviate from the surface normal. End of explanation """ # Only retain vector magnitudes stc = apply_inverse(left_auditory, inv, pick_ori=None) # Visualize it at the moment of peak activity. _, time_max = stc.get_peak(hemi='lh') brain = stc.plot(surface='white', subjects_dir=subjects_dir, initial_time=time_max, time_unit='s', size=(600, 400)) """ Explanation: Discarding dipole orientation information Often, further analysis of the data does not need information about the orientation of the dipoles, but rather their magnitudes. The pick_ori parameter of the :func:mne.minimum_norm.apply_inverse function allows you to specify whether to return the full vector solution ('vector') or rather the magnitude of the vectors (None, the default) or only the activity in the direction perpendicular to the cortex ('normal'). End of explanation """
ctuning/ck-math
script/explore-clblast-matrix-size/clblast-distribution-tuner-sizes-analysis.ipynb
bsd-3-clause
import os import sys import json import re """ Explanation: [PUBLIC] Analysis of CLBlast tuning <a id="overview"></a> Overview This Jupyter Notebook analyses the performance that CLBlast achieves across a range of routines, sizes and configurations. Run first clblast-tuning-benchmarking.py <a id="data"></a> Get the experimental data from DropBox NB: Please ignore this section if you are not interested in re-running or modifying this notebook. The experimental data was collected on the experimental platform and archived as follows: $ cd `ck find ck-math:script:&lt;...&gt;` $ python &lt;...&gt;.py $ ck zip local:experiment:* --archive_name=&lt;...&gt;.zip It can be downloaded and extracted as follows: $ wget &lt;...&gt;.zip $ ck add repo:&lt;....&gt; --zip=&lt;....&gt;.zip --quiet <a id="code"></a> Data wrangling code NB: Please ignore this section if you are not interested in re-running or modifying this notebook. Includes Standard End of explanation """ import IPython as ip import pandas as pd import numpy as np import seaborn as sns import matplotlib as mp print ('IPython version: %s' % ip.__version__) print ('Pandas version: %s' % pd.__version__) print ('NumPy version: %s' % np.__version__) print ('Seaborn version: %s' % sns.__version__) # apt install python-tk print ('Matplotlib version: %s' % mp.__version__) import matplotlib.pyplot as plt from matplotlib import cm %matplotlib inline from IPython.display import Image from IPython.core.display import HTML """ Explanation: Scientific If some of the scientific packages are missing, please install them using: ``` pip install jupyter pandas numpy matplotlib ``` End of explanation """ import ck.kernel as ck print ('CK version: %s' % ck.__version__) """ Explanation: Collective Knowledge If CK is not installed, please install it using: ``` pip install ck ``` End of explanation """ # Return the number of floating-point operations for C = alpha * A * B + beta * C, # where A is a MxK matrix and B is a KxN matrix. def xgemm_flops(alpha, beta, M, K, N): flops_AB = 2*M*N*K if alpha!=0 else 0 flops_C = 2*M*N if beta!=0 else 0 flops = flops_AB + flops_C return flops # Return GFLOPS (Giga floating-point operations per second) for a known kernel and -1 otherwise. def GFLOPS(kernel, run_characteristics, time_ms): if kernel.lower().find('xgemm') != -1: time_ms = np.float64(time_ms) alpha = np.float64(run_characteristics['arg_alpha']) beta = np.float64(run_characteristics['arg_beta']) M = np.int64(run_characteristics['arg_m']) K = np.int64(run_characteristics['arg_k']) N = np.int64(run_characteristics['arg_n']) return (1e-9 * xgemm_flops(alpha, beta, M, K, N)) / (1e-3 * time_ms) else: return (-1.0) def args_str(kernel, run): args = '' if kernel.lower().find('xgemm') != -1: args = 'alpha=%s, beta=%s, M=%s, K=%s, N=%s' % \ (run['arg_alpha'], run['arg_beta'], run['arg_m'], run['arg_k'], run['arg_n']) return args """ Explanation: Define helper functions End of explanation """ def get_experimental_results(repo_uoa='local', tags='explore-clblast-matrix-size'): module_uoa = 'experiment' r = ck.access({'action':'search', 'repo_uoa':repo_uoa, 'module_uoa':module_uoa, 'tags':tags}) if r['return']>0: print ("Error: %s" % r['error']) exit(1) experiments = r['lst'] dfs = [] for experiment in experiments: print experiment data_uoa = experiment['data_uoa'] r = ck.access({'action':'list_points', 'repo_uoa':repo_uoa, 'module_uoa':module_uoa, 'data_uoa':data_uoa}) if r['return']>0: print ("Error: %s" % r['error']) exit(1) for point in r['points']: with open(os.path.join(r['path'], 'ckp-%s.0001.json' % point)) as point_file: point_data_raw = json.load(point_file) characteristics_list = point_data_raw['characteristics_list'] num_repetitions = len(characteristics_list) # Obtain column data. data = [ { 'repetition_id': repetition_id, 'strategy' : tuner_output['strategy'], 'config_id': config_id, 'config' : config['parameters'], 'kernel' : config['kernel'], 'args_id' : args_str(config['kernel'], characteristics['run']), 'ms' : np.float64(config['time']), 'GFLOPS' : GFLOPS(config['kernel'], characteristics['run'], config['time']) } for (repetition_id, characteristics) in zip(range(num_repetitions), characteristics_list) for tuner_output in characteristics['run']['data'] for (config_id, config) in zip(range(len(tuner_output['result'])), tuner_output['result']) ] # Construct a DataFrame. df = pd.DataFrame(data) # Set columns and index names. df.columns.name = 'characteristics' df.index.name = 'index' df = df.set_index([ 'kernel', 'strategy', 'args_id', 'config_id', 'repetition_id' ]) # Append to the list of similarly constructed DataFrames. dfs.append(df) # Concatenate all constructed DataFrames (i.e. stack on top of each other). result = pd.concat(dfs) return result.sortlevel(result.index.names) df = get_experimental_results(tags='explore-clblast-matrix-size,xgemm-fp32') pd.options.display.max_columns = len(df.columns) pd.options.display.max_rows = len(df.index) kernel0 = df.iloc[0].name[0] kernel0 # NB: Unlike mean(), mean() retains the 'config' column. df_kernel0 = df.groupby(level=df.index.names[:-1]).min().loc[kernel0] df_kernel0.groupby(level=df_kernel0.index.names[:-1])['GFLOPS'].min() df_kernel0.groupby(level=df_kernel0.index.names[:-1])['GFLOPS'].max() max_GFLOPS = df_kernel0.loc[df_kernel0['GFLOPS'].argmax()]['GFLOPS'] max_GFLOPS max_GLOPS_config = df_kernel0.loc[df_kernel0['GFLOPS'].argmax()]['config'] max_GLOPS_config best_configs = df_kernel0.loc[df_kernel0.groupby(level=df_kernel0.index.names[:-1])['GFLOPS'].idxmax()]['config'] idx = df_kernel0.groupby(level=df_kernel0.index.names[:-1])['GFLOPS'].idxmax() my = df_kernel0.loc[idx]['config'] for i in my: print i """ Explanation: Access the experimental data End of explanation """ plt.figure(figsize=(12, 10)) sns.set_style('whitegrid'); sns.set_palette('Set1') ax = sns.violinplot(data=df_kernel0.reset_index(), x='GFLOPS', y='args_id', split=True, hue='strategy', hue_order=['random', 'exhaustive']) ax.set_xticks(range(0, int(max_GFLOPS), 1)) ax.set_xlim([0, max_GFLOPS]) # Draw a dotted purple line from top to bottom at the default value (TODO). ax.vlines(linestyles='dotted', colors='purple', x=124, ymin=ax.get_ylim()[0], ymax=ax.get_ylim()[1]) """ Explanation: Plot a violin graph End of explanation """
abevieiramota/data-science-cookbook
2017/05-naive-bayes/Naive_Bayes_Tutorial_01.ipynb
mit
import csv def loadCsv(filename): lines = csv.reader(open(filename, "r")) dataset = list(lines) for i in range(len(dataset)): dataset[i] = [float(x) for x in dataset[i]] return dataset """ Explanation: Naive Bayes Introdução Neste tutorial iremos apresentar a implentação do algoritmo Naive Bayes usando aplicado a dados numéricos. Utilizaremos neste tutorial o conjunto de dador denominado Pima Inidians Diabetes, utilizado para predizer início de diabetes veja neste link. Este problema é composto por 768 observações de detalhes médicos de pacientes indianas. Os registros descrevem as medidas instantâneas tomadas do paciente, como sua idade, o número de vezes grávidas e o tratamento do sangue. Todos os pacientes são mulheres com idade igual ou superior a 21 anos. Todos os atributos são numéricos, e suas unidades variam de atributo a atributo. Cada registro tem um valor de classe que indica se o paciente sofreu um início de diabetes dentro de 5 anos de quando as medidas foram tomadas (1) ou não (0). Este é um conjunto de dados padrão que tem sido estudado muito na literatura de aprendizagem de máquinas. Uma boa precisão de predição é de 70% a 76%. Passos do Tutorial Tratar Dados: carregar os dados do arquivo CSV e divida-o em treinamento e teste conjuntos de dados. Resumir dados: resumir as propriedades no conjunto de dados de treinamento para que possamos calcular probabilidades e fazer previsões. Faça uma Previsão: usar os resumos do conjunto de dados para gerar uma única previsão. Faça previsões: gerar previsões, dado um conjunto de dados de teste e um conjunto de dados de treinamento resumido. Avalie a precisão: avaliar a precisão das previsões feitas para um conjunto de dados de teste como a porcentagem correta de todas as previsões feitas. 1. Tratar Dados 1.1 Carregar arquivo A primeira coisa que precisamos fazer é carregar nosso arquivo de dados. Os dados estão no formato CSV sem linha de cabeçalho. Podemos abrir o arquivo com a função open e ler as linhas de dados usando a função de leitor no módulo csv. Também precisamos converter os atributos que foram carregados como strings em números para que possamos trabalhar com eles. Abaixo está a função loadCsv () para carregar o conjunto de dados Pima indians. End of explanation """ ### COLOQUE SUA RESPOSTA AQUI """ Explanation: Exercicio 1 Teste esta função carregando o dataset pima-indians-diabetes.data e imprime o número de instancias carregadas da seguinte forma "Arquivo carregado pima-indians-diabetes.data com XXX linhas" End of explanation """ import random def splitDataset(dataset, splitRatio): trainSize = int(len(dataset) * splitRatio) trainSet = [] copy = list(dataset) while len(trainSet) < trainSize: index = random.randrange(len(copy)) trainSet.append(copy.pop(index)) return [trainSet, copy] """ Explanation: 1.2 Dividir Arquivo Em seguida, precisamos dividir os dados em um conjunto de dados de treinamento, o qual possa ser usado pelo Naive Bayes para fazer previsões e um conjunto de dados de teste para que possamos usar para avaliar a precisão do modelo. Precisamos dividir o conjunto de dados aleatoriamente em treino e teste, em conjuntos de dados com uma proporção de 67% de treinamento e 33% de teste (esta é uma razão comum para testar um algoritmo em um conjunto de dados). Abaixo está a função splitDataset () que dividirá um determinado conjunto de dados em uma proporção de divisão determinada. End of explanation """ ### COLOQUE SUA RESPOSTA AQUI """ Explanation: Exercicio 2 Teste esta função definindo um dataset mockado com 5 instancias, divida este arquivo em trainamento e teste. Imprima os conjuntos de treinamento e teste gerados, por exemplo, imprimindo: "Dividiu arquivo com 5 linhas em arquivo de treino com [[2], [5], [4]] e de teste com [[1], [3]]" End of explanation """ def separateByClass(dataset): separated = {} for i in range(len(dataset)): vector = dataset[i] if (vector[-1] not in separated): separated[vector[-1]] = [] separated[vector[-1]].append(vector) return separated """ Explanation: 2. Sumarizar Dados O modelo do Naive Bayes é composto basicamente pela sumarização do conjunto de dados de treinamento. Este sumário é então usado ao fazer previsões. O resumo dos dados de treinamento coletados envolve a média e o desvio padrão para cada atributo, pelo valor da classe. Por exemplo, se houver dois valores de classe e 7 atributos numéricos, então precisamos de um desvio padrão e médio para cada combinação de atributo (7) e valor de classe (2), ou seja, 14 resumos de atributos. Estes são necessários ao fazer previsões para calcular a probabilidade de valores de atributos específicos pertencentes a cada valor de classe. Para sumarizar os dados criamos as seguintes subtarefas: Separar dados por classe Calcular Média Calcular o desvio padrão Conjunto de dados de resumo Resumir atributos por classe Separar dados por classe 2.1 Separar dados por classe A primeira tarefa é separar as instâncias do conjunto de dados de treinamento pelo valor da classe para que possamos calcular as estatísticas para cada classe. Podemos fazer isso criando um mapa de cada valor de classe para uma lista de instâncias que pertencem a essa classe e classificar todo o conjunto de dados de instâncias nas listas apropriadas. A função separadaByClass () abaixo faz isso. End of explanation """ ### COLOQUE SUA RESPOSTA AQUI """ Explanation: Exercicio 3 Teste este função com alguns exemplos de dados sintéticos e imprima as classes separadas com seus respectivas instancias. Perceba no exemplo acima que a classe se refere ao último elemento do vetor. Segue um exemplo de saída: "Instancias separadas por classes: {1: [[1, 20, 1], [3, 22, 1]], 0: [[2, 21, 0]]}" End of explanation """ import math def mean(numbers): return sum(numbers)/float(len(numbers)) def stdev(numbers): avg = mean(numbers) variance = sum([pow(x-avg,2) for x in numbers])/float(len(numbers)-1) return math.sqrt(variance) """ Explanation: 2.2 Calcular Média e Desvio Padrão Precisamos calcular a média de cada atributo para um valor de classe. A média é a tendência central central ou central dos dados, e vamos usá-lo como meio de nossa distribuição gaussiana ao calcular probabilidades. Também precisamos calcular o desvio padrão de cada atributo para um valor de classe. O desvio padrão descreve a variação da disseminação dos dados, e vamos usá-lo para caracterizar a propagação esperada de cada atributo em nossa distribuição gaussiana ao calcular probabilidades. O desvio padrão é calculado como a raiz quadrada da variância. A variância é calculada como a média das diferenças quadradas para cada valor de atributo da média. Observe que estamos usando o método N-1, que subtrai 1 do número de valores de atributo ao calcular a variância. End of explanation """ ### COLOQUE SUA RESPOSTA AQUI """ Explanation: Exercicio 4 Crie alguns dados fictícios e teste as funções criadas.Exemplo: "Cálculo de [1, 2, 3, 4, 5]: média=3.0, stdev=1.5811388300841898" End of explanation """ def summarize(dataset): summaries = [(mean(attribute), stdev(attribute)) for attribute in zip(*dataset)] del summaries[-1] return summaries """ Explanation: 2.2 Sumarizar os dados Agora temos as ferramentas para resumir um conjunto de dados. Para uma determinada lista de instâncias (para um valor de classe), podemos calcular a média e o desvio padrão para cada atributo. A função zip agrupa os valores de cada atributo em nossas instâncias de dados em suas próprias listas para que possamos calcular os valores de desvio padrão e média para o atributo. End of explanation """ ### COLOQUE SUA RESPOSTA AQUI """ Explanation: Exercicio 5 Crie alguns dados fictícios e teste as funções criadas.Exemplo de saída: "Sumário dos atributos: [(2.0, 1.0), (21.0, 1.0)]" End of explanation """ def summarizeByClass(dataset): separated = separateByClass(dataset) summaries = {} for classValue, instances in separated.items(): summaries[classValue] = summarize(instances) return summaries """ Explanation: 2.3 Sumarizar Atributos por classes Podemos juntar tudo ao separar nosso conjunto de dados de treinamento em instâncias agrupadas por classe, usando a função summarizeByClass() End of explanation """ ### COLOQUE SUA RESPOSTA AQUI """ Explanation: Exercicio 6 Teste a função acima, usando um pequeno conjunto de dados. Exemplo de saída: Resumo por classe: {1: [(2.0, 1.4142135623730951), (21.0, 1.4142135623730951)], 0: [(3.0, 1.4142135623730951), (21.5, 0.7071067811865476)]} End of explanation """ def calculateProbability(x, mean, stdev): exponent = math.exp(-(math.pow(x-mean,2)/(2*math.pow(stdev,2)))) return (1 / (math.sqrt(2*math.pi) * math.pow(stdev, 2))) * exponent """ Explanation: 3. Realizar as Predição Agora estamos prontos para fazer previsões usando os resumos preparados a partir dos nossos dados de treinamento. As previsões envolvem o cálculo da probabilidade de uma dada instância de dados pertencer a cada classe e a seleção da classe com a maior probabilidade de previsão. Podemos dividir essa parte nas seguintes tarefas: Calcular a função de densidade de probabilidade gaussiana Calcular probabilidades das classes Fazer uma previsão Fazer várias previsões Obter acurácia 3.1 Calcular a função de densidade de probabilidade gaussiana Podemos usar uma função gaussiana para estimar a probabilidade de um determinado valor de atributo, dada a média conhecida e o desvio padrão para o atributo estimado a partir dos dados de treinamento. Dado que os resumos de atributos para cada atributo e valor de classe, o resultado é a probabilidade condicional de um determinado valor de atributo dado um valor de classe. Veja as referências para os detalhes desta equação para a função de densidade de probabilidade gaussiana. Em resumo, estamos conectando nossos detalhes conhecidos ao Gauss (valor do atributo, média e desvio padrão) e recuperando a probabilidade de que nosso valor de atributo pertença à classe. Na função calcularProbability (), calculamos o expoente primeiro, depois calculamos a divisão principal. Isso nos permite ajustar a equação bem em duas linhas. End of explanation """ ### COLOQUE SUA RESPOSTA AQUI """ Explanation: Exercicio 7 Teste a função acima, usando um pequeno conjunto de dados. Exemplo de saída: "Probabilidade para pertencer a esta classe: 0.06248965759370005" End of explanation """ def calculateClassProbabilities(summaries, inputVector): probabilities = {} for classValue, classSummaries in summaries.items(): probabilities[classValue] = 1 for i in range(len(classSummaries)): mean, stdev = classSummaries[i] x = inputVector[i] probabilities[classValue] *= calculateProbability(x, mean, stdev) return probabilities """ Explanation: 3.2 Calcular probabilidades das classes Neste momento, podemos calcular a probabilidade de um atributo pertencente a uma classe, podemos combinar as probabilidades de todos os valores dos atributos para uma instância de dados e apresentar uma probabilidade de toda a instância de dados pertencente à classe. Combinamos as probabilidades juntas, multiplicando-as. Na função calculateClassProbabilities () abaixo, a probabilidade de uma determinada instância de dados é calculada multiplicando as probabilidades de atributo para cada classe. O resultado é um mapa de valores de classe para probabilidades. End of explanation """ ### COLOQUE SUA RESPOSTA AQUI """ Explanation: Exercicio 8 Teste a função acima, usando um pequeno conjunto de dados. Exemplo de saída: "Probabilidades para cada classe: {0: 0.7820853879509118, 1: 6.298736258150442e-05}" End of explanation """ def predict(summaries, inputVector): probabilities = calculateClassProbabilities(summaries, inputVector) bestLabel, bestProb = None, -1 for classValue, probability in probabilities.items(): if bestLabel is None or probability > bestProb: bestProb = probability bestLabel = classValue return bestLabel """ Explanation: 3.3 Fazer a Predição Agora podemos calcular a probabilidade de uma instância de dados pertencente a cada valor de classe, podemos procurar a maior probabilidade e retornar a classe associada. A função predict() realiza esta tarefa. End of explanation """ ### COLOQUE SUA RESPOSTA AQUI """ Explanation: Exercicio 9 Teste a função acima, usando um pequeno conjunto de dados. Exemplo de saída: "Entrada {'A': [(1, 0.5)], 'B': [(20, 5.0)]} Consulta [1.1, '?'] Predição: Classe A" End of explanation """ def getPredictions(summaries, testSet): predictions = [] for i in range(len(testSet)): result = predict(summaries, testSet[i]) predictions.append(result) return predictions """ Explanation: 3.4 Fazer várias predições Finalmente, podemos estimar a precisão do modelo fazendo previsões para cada instância de dados em nosso conjunto de dados de teste. A função getPredictions () realizará esta tarefa e retornará uma lista de previsões para cada instância de teste. End of explanation """ ### COLOQUE SUA RESPOSTA AQUI """ Explanation: Exercicio 10 Teste a função acima, usando um pequeno conjunto de dados. Exemplo de saída: "Predições: Sumarios {'A': [(1, 0.5)], 'B': [(20, 5.0)]} Teste [[1.1, '?'], [19.1, '?']] Classes Preditas['A', 'B']" End of explanation """ def getAccuracy(testSet, predictions): correct = 0 for i in range(len(testSet)): if testSet[i][-1] == predictions[i]: correct += 1 return (correct/float(len(testSet))) * 100.0 """ Explanation: 3.5 Calcular Acurácia As previsões podem ser comparadas com os valores de classe no conjunto de dados de teste. A acurácia da classificação pode ser calculada como uma relação de precisão entre 0 e 100%. A função getAccuracy () calculará essa relação de precisão. End of explanation """ ### COLOQUE SUA RESPOSTA AQUI """ Explanation: Exercicio 11 Teste a função acima, usando um pequeno conjunto de dados. Exemplo de saída: "Resultado: Teste [[1, 1, 1, 'a'], [2, 2, 2, 'a'], [3, 3, 3, 'b']] predições ['a', 'a', 'a'] Acurácia 66.66666666666666" End of explanation """ ### COLOQUE SUA RESPOSTA AQUI """ Explanation: Exercicio 12 Junte todo o código acima, crie uma função main e execute a predição para o dataset "pima-indians-diabetes.data", observando a acurácia obtida. Execute várias vezes e analise a variação da acurária ao longo dessas execuções. End of explanation """
rebeccabilbro/rebeccabilbro.github.io
_drafts/mushroom_tutorial_reboot.ipynb
mit
from yellowbrick.datasets import load_mushroom X, y = load_mushroom() print(X[:5]) # inspect the first five rows """ Explanation: Model Selection Tutorial with Yellowbrick In this tutorial, we are going to look at scores for a variety of scikit-learn models and compare them using visual diagnostic tools from Yellowbrick in order to select the best model for our data. The Model Selection Triple Discussions of machine learning are frequently characterized by a singular focus on model selection. Be it logistic regression, random forests, Bayesian methods, or artificial neural networks, machine learning practitioners are often quick to express their preference. The reason for this is mostly historical. Though modern third-party machine learning libraries have made the deployment of multiple models appear nearly trivial, traditionally the application and tuning of even one of these algorithms required many years of study. As a result, machine learning practitioners tended to have strong preferences for particular (and likely more familiar) models over others. However, model selection is a bit more nuanced than simply picking the "right" or "wrong" algorithm. In practice, the workflow includes: selecting and/or engineering the smallest and most predictive feature set choosing a set of algorithms from a model family, and tuning the algorithm hyperparameters to optimize performance. The model selection triple was first described in a 2015 SIGMOD paper by Kumar et al. In their paper, which concerns the development of next-generation database systems built to anticipate predictive modeling, the authors cogently express that such systems are badly needed due to the highly experimental nature of machine learning in practice. "Model selection," they explain, "is iterative and exploratory because the space of [model selection triples] is usually infinite, and it is generally impossible for analysts to know a priori which [combination] will yield satisfactory accuracy and/or insights." Recently, much of this workflow has been automated through grid search methods, standardized APIs, and GUI-based applications. In practice, however, human intuition and guidance can more effectively hone in on quality models than exhaustive search. By visualizing the model selection process, data scientists can steer towards final, explainable models and avoid pitfalls and traps. The Yellowbrick library is a diagnostic visualization platform for machine learning that allows data scientists to steer the model selection process. Yellowbrick extends the scikit-learn API with a new core object: the Visualizer. Visualizers allow visual models to be fit and transformed as part of the scikit-learn Pipeline process, providing visual diagnostics throughout the transformation of high dimensional data. About the Data This tutorial uses the mushrooms data from the Yellowbrick datasets module. NOTE: The YB version of the mushrooms data differs from the mushroom dataset from the UCI Machine Learning Repository. The Yellowbrick version has been deliberately modified to make modeling a bit more of a challenge. The data include descriptions of hypothetical samples corresponding to 23 species of gilled mushrooms in the Agaricus and Lepiota Family. Each species was identified as definitely edible, definitely poisonous, or of unknown edibility and not recommended (this latter class was combined with the poisonous one). Our file, "agaricus-lepiota.txt," contains information for 3 nominally valued attributes and a target value from 8124 instances of mushrooms (4208 edible, 3916 poisonous). Let's load the data: End of explanation """ from sklearn.metrics import f1_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder, LabelEncoder def score_model(X, y, estimator, **kwargs): """ Test various estimators. """ y = LabelEncoder().fit_transform(y) model = Pipeline([ ('one_hot_encoder', OneHotEncoder()), ('estimator', estimator) ]) # Instantiate the classification model and visualizer model.fit(X, y, **kwargs) expected = y predicted = model.predict(X) # Compute and return F1 (harmonic mean of precision and recall) print("{}: {}".format(estimator.__class__.__name__, f1_score(expected, predicted))) # Try them all! from sklearn.svm import LinearSVC, NuSVC, SVC from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegressionCV, LogisticRegression, SGDClassifier from sklearn.ensemble import BaggingClassifier, ExtraTreesClassifier, RandomForestClassifier models = [ SVC(gamma='auto'), NuSVC(gamma='auto'), LinearSVC(), SGDClassifier(max_iter=100, tol=1e-3), KNeighborsClassifier(), LogisticRegression(solver='lbfgs'), LogisticRegressionCV(cv=3), BaggingClassifier(), ExtraTreesClassifier(n_estimators=100), RandomForestClassifier(n_estimators=100) ] for model in models: score_model(X, y, model) """ Explanation: Feature Extraction Our data, including the target, is categorical. We will need to change these values to numeric ones for machine learning. In order to extract this from the dataset, we'll have to use scikit-learn transformers to transform our input dataset into something that can be fit to a model. Luckily, scikit-learn does provide transformers for converting categorical labels into numeric integers: sklearn.preprocessing.LabelEncoder and sklearn.preprocessing.OneHotEncoder. We'll use a combination of scikit-learn's Pipeline object (here's great post on using pipelines by Zac Stewart), OneHotEncoder, and LabelEncoder: ```python from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder, LabelEncoder y = LabelEncoder().fit_transform(y) # Label-encode targets before modeling model = Pipeline([ ('one_hot_encoder', OneHotEncoder()), # One-hot encode columns before modeling ('estimator', estimator) ]) ``` Modeling and Evaluation Common metrics for evaluating classifiers Precision is the number of correct positive results divided by the number of all positive results (e.g. How many of the mushrooms we predicted would be edible actually were?). Recall is the number of correct positive results divided by the number of positive results that should have been returned (e.g. How many of the mushrooms that were poisonous did we accurately predict were poisonous?). The F1 score is a measure of a test's accuracy. It considers both the precision and the recall of the test to compute the score. The F1 score can be interpreted as a weighted average of the precision and recall, where an F1 score reaches its best value at 1 and worst at 0. precision = true positives / (true positives + false positives) recall = true positives / (false negatives + true positives) F1 score = 2 * ((precision * recall) / (precision + recall)) Now we're ready to make some predictions! Let's build a way to evaluate multiple estimators &mdash; first using traditional numeric scores (which we'll later compare to some visual diagnostics from the Yellowbrick library). End of explanation """ from sklearn.pipeline import Pipeline from yellowbrick.classifier import ClassificationReport def visualize_model(X, y, estimator): """ Test various estimators. """ y = LabelEncoder().fit_transform(y) model = Pipeline([ ('one_hot_encoder', OneHotEncoder()), ('estimator', estimator) ]) # Instantiate the classification model and visualizer visualizer = ClassificationReport( model, classes=['edible', 'poisonous'], cmap="Reds", size=(600, 360) ) visualizer.fit(X, y) visualizer.score(X, y) visualizer.poof() for model in models: visualize_model(X, y, model) """ Explanation: Preliminary Model Evaluation Based on the results from the F1 scores above, which model is performing the best? Visual Model Evaluation Now let's refactor our model evaluation function to use Yellowbrick's ClassificationReport class, a model visualizer that displays the precision, recall, and F1 scores. This visual model analysis tool integrates numerical scores as well color-coded heatmap in order to support easy interpretation and detection, particularly the nuances of Type I and Type II error, which are very relevant (lifesaving, even) to our use case! Type I error (or a "false positive") is detecting an effect that is not present (e.g. determining a mushroom is poisonous when it is in fact edible). Type II error (or a "false negative") is failing to detect an effect that is present (e.g. believing a mushroom is edible when it is in fact poisonous). End of explanation """
reece/ga4gh-examples
nb/Search VariantAnnotations using SO term sets.ipynb
apache-2.0
import itertools import pprint import re from IPython.display import HTML, display import ga4gh.client import prettytable import requests print(ga4gh.__version__) gc = ga4gh.client.HttpClient("http://localhost:8000") region_constraints = dict(referenceName="1", start=0, end=int(1e10)) variant_set_id = 'YnJjYTE6T1I0Rg' variant_annotation_sets = list(gc.searchVariantAnnotationSets(variant_set_id)) variant_annotation_set = variant_annotation_sets[0] print("Using first variant annotation set (of {n} total) for variant set {vs_id}\nvas_id={vas.id}".format( n=len(variant_annotation_sets), vs_id=variant_set_id, vas=variant_annotation_set)) # poor-man's SO name-to-id map # so_name_id_map will look like this: # {u'natural_variant_site': u'SO:0001147', # u'polypeptide_zinc_ion_contact_site': u'SO:0001103', # u'methylated_adenine': u'SO:0000161', # ... id_name_re = re.compile("id: (?P<id>SO:\d+)\nname: (?P<name>\S+)") url = "https://raw.githubusercontent.com/The-Sequence-Ontology/SO-Ontologies/master/so-xp-simple.obo" so_name_id_map = { m.group('name'): m.group('id') for m in (id_name_re.search(s) for s in requests.get(url).text.split("\n\n")) if m is not None } def mk_effect_id_filter(so_ids=[]): """return list of OntologyTerm effect filters for the given list of so ids >>> print(_mk_effect_id_filter("SO:123 SO:456".split())) [{'id': 'SO:123'}, {'id': 'SO:456'}] """ return [{"id": id} for id in so_ids] def remap_with_so_ids(so_name_map): """For a map of label => [so names], return a map of label => [so ids]""" def _map1(n): try: return so_name_id_map[n] except KeyError: print("SO term name '{n}' is not (currently) valid".format(n=n)) return {label: filter(None, (_map1(n) for n in names)) for label, names in so_name_map.items()} """ Explanation: Search VariantAnnotations using SO term sets This code demonstrates how to use named sets of SO terms to annotate variants or search for variants. Possible uses include classififcation of variation by "impact" (e.g., à la SnpEff) and classification variation by region. Important definitions: An SO "term" refers to a concept. The proper primary key for a SO term is a SO "id" (e.g., SO:0001147). Each term also has a "name" (natural_variant_site), "definition" ("Describes the natural sequence variants due to polymorphisms..."), and other fields. Names may change (and have changed) for a given SO id; therefore, developers should use SO ids internally. This notebook generates SO maps that consists of a set of annotation labels (as map keys) mapped to a set of SO terms. Each map is named. For example, a SnpEff map might have labels "high", "moderate", and "low", with each label mapping to a set of SO terms. The results here may be compared with the survey of terms from the same region in Exploring SO terms End of explanation """ snpeff_so_name_map = { "high": [ "chromosome_large_deletion", "chromosome_large_inversion", "chromosome_large_duplication", "gene_rearrangement", "gene_deleted", "gene_fusion", "gene_fusion_reverese", "transcript_deleted", "exon_deleted", "exon_deleted_partial", "exon_duplication", "exon_duplication_partial", "exon_inversion", "exon_inversion_partial", "frame_shift", "stop_gained", "stop_lost", "start_lost", "splice_site_acceptor", "splice_site_donor", "rare_amino_acid", "protein_protein_interaction_locus", "protein_structural_interaction_locus", ], "moderate": [ "non_synonymous_coding", "codon_insertion", "codon_change_plus_codon_insertion", "codon_deletion", "codon_change_plus_codon_deletion", "utr_5_deleted", "utr_3_deleted", "splice_site_branch_u12", "splice_site_region", "splice_site_branch", "non_synonymous_stop", "non_synonymous_start", "synonymous_coding", "synonymous_start", "synonymous_stop", "codon_change", ], "low": [ "gene_inversion", "gene_duplication", "transcript_duplication", "transcript_inversion", "utr_5_prime", "utr_3_prime", "start_gained", "upstream", "downstream", "motif", "motif_deleted", "regulation", "micro_rna", ], "modifiers": [ "custom", "next_prot", "intron_conserved", "intron", "intragenic", "intergenic_conserved", "intergenic", "cds", "exon", "transcript", "gene", "sequence", "chromosome_elongation", "chromosome", "genome", "none", ] } snpeff_so_id_map = remap_with_so_ids(snpeff_so_name_map) snpeff_so_id_map """ Explanation: SnpEff map classifies SO terms by predicted impact derived from https://github.com/pcingola/SnpEff/blob/18ad192f751d2e34595949dda8b25c295c8a9ca1/src/main/java/org/snpeff/snpEffect/EffectType.java End of explanation """ region_so_name_map = { "locus": [ "gene_fusion", "upstream_gene_variant", ], "cds": [ "missense_variant", "start_lost", "stop_gained", "stop_lost", "synonymous_variant", ], # note that utr, upstream, and downstream sets overlap intentionally "utr": [ "3_prime_UTR_variant", "5_prime_UTR_variant", ], "upstream": [ "5_prime_UTR_variant", "upstream_gene_variant", ], "downstream": [ "3_prime_UTR_variant", "downstream_gene_variant", ], } region_so_id_map = remap_with_so_ids(region_so_name_map) """ Explanation: Region name map This is really just a contrived example of another kind of SO annotation that someone might find useful. End of explanation """ so_maps = {"snpeff": snpeff_so_id_map, "region": region_so_id_map} pprint.pprint(so_maps) """ Explanation: Meta maps so_maps contains both maps End of explanation """ field_names = "n_vars name:label n_so_ids so_ids".split() pt = prettytable.PrettyTable(field_names=field_names) for name, so_map in so_maps.items(): for label, so_ids in so_map.items(): vs = [] # Searching with an empty filter means no filtering # This should be changed: searching should be by inclusion, not lack of exclusion. if len(so_ids)>0: efilter = mk_effect_id_filter(so_ids) vs = list(gc.searchVariantAnnotations(variant_annotation_set.id, effects=efilter, **region_constraints)) pt.add_row([ len(vs), name + ":" + label, len(so_ids), " ".join(so_ids) ]) display(HTML(pt.get_html_string())) """ Explanation: Search for variants by each SO map End of explanation """ # invert the SO map (name: {SO: label}) def invert_so_map(so_map): """for a so_map of {label: [so_id]}, return the inverse {so_id: [labels]}. so_id:label is many:many """ lmap = sorted((so, label) for label, so_ids in so_map.items() for so in so_ids) return {k: list(sl[1] for sl in sli) for k, sli in itertools.groupby(lmap, key=lambda e: e[0])} def unique_labels_for_so_ids(so_labels_map, so_ids): """given a map of {so: [labels]} and a list of so_ids, return a list of unique labels""" uniq_labels = set(itertools.chain.from_iterable(so_labels_map.get(so_id, []) for so_id in so_ids)) return list(uniq_labels) def build_variant_record(v): so_ids = list(set(eff.id for te in v.transcriptEffects for eff in te.effects)) impacts = unique_labels_for_so_ids(so_labels_maps["snpeff"], so_ids) regions = unique_labels_for_so_ids(so_labels_maps["region"], so_ids) return dict( g = v.transcriptEffects[0].hgvsAnnotation.genomic, t = v.transcriptEffects[0].hgvsAnnotation.transcript, p = v.transcriptEffects[0].hgvsAnnotation.protein, so_ids = " ".join(so_ids), impacts = " ".join(impacts), regions = " ".join(regions) ) so_labels_maps = {name: invert_so_map(so_map) for name, so_map in so_maps.items()} pprint.pprint(so_labels_maps) variants = list(gc.searchVariantAnnotations( variant_annotation_set.id, effects = mk_effect_id_filter("SO:0001587 SO:0001819".split()), **region_constraints)) field_names = "g t p so_ids impacts regions".split() pt = prettytable.PrettyTable(field_names=field_names) for v in variants: vrec = build_variant_record(v) pt.add_row([vrec[k] for k in field_names]) display(HTML(pt.get_html_string())) """ Explanation: Label variants with SO maps This is essentially the inverse of the above End of explanation """
Vvkmnn/books
AutomateTheBoringStuffWithPython/lesson39.ipynb
gpl-3.0
# Test the requests module by importing it import requests # Store a website url in a response object that can be queried res = requests.get('https://automatetheboringstuff.com/files/rj.txt') """ Explanation: Lesson 39: Downloading from the Web with the Requests Module The requests module lets you easily download files from the web without complicated issues. requests does not come with Python, so it must be installed manually with pip. End of explanation """ res.status_code """ Explanation: Response objects can be checked via status codes: '404' is the typical 'file not found' code. '200' is the typical 'success' code. End of explanation """ # Print the first 100 lines print(res.text[:1000]) """ Explanation: The response object has succeded, and all values are stored within it: End of explanation """ # Run method on existing response object; won't raise anything because no error res.raise_for_status() # An example bad request badres = requests.get('https://automatetheboringstuff.com/134513135465614561456') badres.raise_for_status() """ Explanation: A typical way to deal with status is to use a raise_for_status() statement, which will crash if a file is not found, and can be used in conjunction with boolean statements, and try and except statements. End of explanation """ # Open/create a file to store the bytes, using a new name playFile= open('files/RomeoAnd Juliet.txt', 'wb') # Iteratively write each 100,000 byte 'chunk' of data into this file for chunk in res.iter_content(100000): playFile.write(chunk) # Close to save file playFile.close() """ Explanation: Files downloaded in this way must be stored in wb or write-binary method, to preserve the unicode formatting of this text. An explanation of unicode and its relationship to Python can be found here. To store this file, we therefore need to write it in 'byte' chunks to a binary file. A useful method to help do this is the response object's iter_content method. End of explanation """
zach-hartwig/IPyLogbook
mgmt/IPyLogbookExtensions.ipynb
gpl-3.0
# Enable Python variables to by inserted into Markdown cells via the "{{}}" syntax use_python_markdown = True # Enable cells to be 'read-only' via 'lock' click button up above-right use_read_only = True # Enable all input cells to be hidden via ' bars'click button above-right use_hide_input_all = True # Enable png/jpg images to be added to notebook by drag-and-drop use_drag_and_drop = True """ Explanation: IPyLogbook Extensions This IPython notebook can be used to configure the IPython-notebook-extensions that are used to enhance the IPyLogbook system. The user can choose which extensions will be loaded automatically when the notebook server is started. A customized version of the IPython-notebook-extensions should be installed in the .ipython/nbextensions directory in order to be used. A custom version is primarily being used to correct bugs found in the official GitHub development version. The user should be warned that the python-markdown extension is crucial to ensure that URLs with the Markdown cells are correctly set! To configure extensions: 1. Set each of the extension flags in the following cell to 'True' (enabled) or 'None' (disabled) 2. Run all cells via the Cell $\rightarrow$ Run All menu option 3. Save the notebook via the File $\rightarrow$ Save and Checkpoint menu option or the Ctrl-s key binding 4. Restart the IPython notebook server for changes to take affect End of explanation """ import IPython from IPython.html.services.config import ConfigManager ip=IPython.get_ipython() ip.ipython_dir extensions_dir = "IPython-notebook-extensions-master/" python_markdown = extensions_dir + "usability/python-markdown" drag_and_drop = extensions_dir + "usability/dragdrop/drag-and-drop" read_only = extensions_dir + "usability/read-only" hide_input_all = extensions_dir + "usability/hide_input_all" cm = ConfigManager(parent=ip, profile_dir=ip.profile_dir.location) cm.update('notebook', {"load_extensions": {python_markdown: use_python_markdown}}) cm.update('notebook', {"load_extensions": {drag_and_drop: use_drag_and_drop}}) cm.update('notebook', {"load_extensions": {read_only: use_read_only}}) cm.update('notebook', {"load_extensions": {hide_input_all: use_hide_input_all}}) """ Explanation: Load IPython-notebook-extensions End of explanation """ import IPython from IPython.html.services.config import ConfigManager from IPython.html.services.config import ConfigManager from IPython.display import HTML cm = ConfigManager(parent=ip, profile_dir=ip.profile_dir.location) extensions = cm.get('notebook') table = "" for ext in extensions['load_extensions']: table += "<tr><td>%s</td>\n" % (ext) top = """ <table border="1"> <tr> <th>The following extensions will be automatically loaded by IPyLogbook:</th> </tr> """ bottom = """ </table> """ HTML(top + table + bottom) """ Explanation: List IPython-notebook-extensions End of explanation """
rflamary/POT
docs/source/auto_examples/plot_otda_color_images.ipynb
mit
# Authors: Remi Flamary <remi.flamary@unice.fr> # Stanislas Chambon <stan.chambon@gmail.com> # # License: MIT License import numpy as np from scipy import ndimage import matplotlib.pylab as pl import ot r = np.random.RandomState(42) def im2mat(I): """Converts an image to matrix (one pixel per line)""" return I.reshape((I.shape[0] * I.shape[1], I.shape[2])) def mat2im(X, shape): """Converts back a matrix to an image""" return X.reshape(shape) def minmax(I): return np.clip(I, 0, 1) """ Explanation: OT for image color adaptation This example presents a way of transferring colors between two images with Optimal Transport as introduced in [6] [6] Ferradans, S., Papadakis, N., Peyre, G., & Aujol, J. F. (2014). Regularized discrete optimal transport. SIAM Journal on Imaging Sciences, 7(3), 1853-1882. End of explanation """ # Loading images I1 = ndimage.imread('../data/ocean_day.jpg').astype(np.float64) / 256 I2 = ndimage.imread('../data/ocean_sunset.jpg').astype(np.float64) / 256 X1 = im2mat(I1) X2 = im2mat(I2) # training samples nb = 1000 idx1 = r.randint(X1.shape[0], size=(nb,)) idx2 = r.randint(X2.shape[0], size=(nb,)) Xs = X1[idx1, :] Xt = X2[idx2, :] """ Explanation: Generate data End of explanation """ pl.figure(1, figsize=(6.4, 3)) pl.subplot(1, 2, 1) pl.imshow(I1) pl.axis('off') pl.title('Image 1') pl.subplot(1, 2, 2) pl.imshow(I2) pl.axis('off') pl.title('Image 2') """ Explanation: Plot original image End of explanation """ pl.figure(2, figsize=(6.4, 3)) pl.subplot(1, 2, 1) pl.scatter(Xs[:, 0], Xs[:, 2], c=Xs) pl.axis([0, 1, 0, 1]) pl.xlabel('Red') pl.ylabel('Blue') pl.title('Image 1') pl.subplot(1, 2, 2) pl.scatter(Xt[:, 0], Xt[:, 2], c=Xt) pl.axis([0, 1, 0, 1]) pl.xlabel('Red') pl.ylabel('Blue') pl.title('Image 2') pl.tight_layout() """ Explanation: Scatter plot of colors End of explanation """ # EMDTransport ot_emd = ot.da.EMDTransport() ot_emd.fit(Xs=Xs, Xt=Xt) # SinkhornTransport ot_sinkhorn = ot.da.SinkhornTransport(reg_e=1e-1) ot_sinkhorn.fit(Xs=Xs, Xt=Xt) # prediction between images (using out of sample prediction as in [6]) transp_Xs_emd = ot_emd.transform(Xs=X1) transp_Xt_emd = ot_emd.inverse_transform(Xt=X2) transp_Xs_sinkhorn = ot_sinkhorn.transform(Xs=X1) transp_Xt_sinkhorn = ot_sinkhorn.inverse_transform(Xt=X2) I1t = minmax(mat2im(transp_Xs_emd, I1.shape)) I2t = minmax(mat2im(transp_Xt_emd, I2.shape)) I1te = minmax(mat2im(transp_Xs_sinkhorn, I1.shape)) I2te = minmax(mat2im(transp_Xt_sinkhorn, I2.shape)) """ Explanation: Instantiate the different transport algorithms and fit them End of explanation """ pl.figure(3, figsize=(8, 4)) pl.subplot(2, 3, 1) pl.imshow(I1) pl.axis('off') pl.title('Image 1') pl.subplot(2, 3, 2) pl.imshow(I1t) pl.axis('off') pl.title('Image 1 Adapt') pl.subplot(2, 3, 3) pl.imshow(I1te) pl.axis('off') pl.title('Image 1 Adapt (reg)') pl.subplot(2, 3, 4) pl.imshow(I2) pl.axis('off') pl.title('Image 2') pl.subplot(2, 3, 5) pl.imshow(I2t) pl.axis('off') pl.title('Image 2 Adapt') pl.subplot(2, 3, 6) pl.imshow(I2te) pl.axis('off') pl.title('Image 2 Adapt (reg)') pl.tight_layout() pl.show() """ Explanation: Plot new images End of explanation """
ml-ensemble/ml-ensemble.github.io
info/_downloads/parallel.ipynb
mit
from mlens.parallel import ParallelProcessing, Job, Learner from mlens.index import FoldIndex from mlens.utils.dummy import OLS import numpy as np np.random.seed(2) X = np.arange(20).reshape(10, 2) y = np.random.rand(10) indexer = FoldIndex(folds=2) learner = Learner(estimator=OLS(), indexer=indexer, name='ols') manager = ParallelProcessing(n_jobs=-1) out = manager.map(learner, 'fit', X, y, return_preds=True) print(out) """ Explanation: .. currentmodule: mlens.parallel Parallel Mechanics ML-Ensemble is designed to provide an easy user interface. But it is also designed to be extremely flexible, all the wile providing maximum concurrency at minimal memory consumption. The lower-level API that builds the ensemble and manages the computations is constructed in as modular a fashion as possible. The low-level API introduces a computational graph-like environment that you can directly exploit to gain further control over your ensemble. In fact, building your ensemble through the low-level API is almost as straight forward as using the high-level API. In this tutorial, we will walk through the core :class:ParallelProcessing class. The purpose of the :class:ParallelProcessing class is to provide a streamlined interface for scheduling and allocating jobs in a nested sequence of tasks. The typical case is a sequence of :class:Layer instances where the output of one layer becomes the input to the next. While the layers must therefore be fitted sequentially, each layer should be fitted in parallel. We might be interested in propagating some of the features from one layer to the next, in which case we need to take care of the array allocation. ParallelProcessing API ^^^^^^^^^^^^^^^^^^^^^^ Basic map ¨¨¨¨¨¨¨¨¨ In the simplest case, we have a caller that has a set of tasks that needs to be evaluated in parallel. For instance, the caller might be a :class:Learner, with each task being a fit job for a given cross-validation fold. In this simple case, we want to perform an embarrassingly parallel for-loop of each fold, which we can achieve with the map method of the :class:ParallelProcessing class. End of explanation """ from mlens.parallel import Transformer, Pipeline from mlens.utils.dummy import Scale from sklearn.base import BaseEstimator, TransformerMixin def error_scorer(p, y): return np.abs(p - y) class Error(BaseEstimator, TransformerMixin): """Transformer that computes the errors of a base learners""" def __init__(self, scorer): self.scorer = scorer def fit(self, X, y): return self def transform(self, X, y): return self.scorer(X, y), y """ Explanation: Stacking a set of parallel jobs Suppose instead that we have a sequence of learners, where we want to fit each on the errors of the previous learner. We can achieve this by using stack method and a preprocessing pipeline for computing the errors. First, we need to construct a preprocessing class to transform the input, which will be the preceding learner's predictions, into errors. End of explanation """ tasks = [] for i in range(3): if i != 0: pipeline = Pipeline([('err', Error(error_scorer))], return_y=True) transformer = Transformer( estimator=pipeline, indexer=indexer, name='sc-%i' % (i + 1) ) tasks.append(transformer) learner = Learner( estimator=OLS(), preprocess='sc-%i' % (i+1) if i != 0 else None, indexer=indexer, name='ols-%i' % (i + 1) ) tasks.append(learner) """ Explanation: Now, we construct a sequence of tasks to compute, where the output of one task will be the input to the next. Hence, we want a sequence of the form [learner, transformer, ..., learner]: End of explanation """ out = manager.stack( tasks, 'fit', X, y, return_preds=True, split=False) print(out) """ Explanation: To fit the stack, we call the stack method on the manager, and since each learner must have access to their transformer, we set split=False (otherwise each task will have a separate sub-cache, sealing them off from each other). End of explanation """ out = manager.initialize( 'fit', X, y, None, return_preds=['ols-1', 'ols-3'], stack=True, split=False) """ Explanation: If we instead want to append these errors as features, we can simply alter our transformer to concatenate the errors to the original data. Alternatively, we can automate the process by instead using the :class:mlens.ensemble.Sequential API. Manual initialization and processing Under the hood, both map and stack first call initialize on the manager, followed by a call to process with some default arguments. For maximum control, we can manually do the initialization and processing step. When we initialize, an instance of :class:Job is created that collect arguments relevant for of the job as well as handles for data to be used. For instance, we can specify that we want the predictions of all layers, as opposed to just the final layer: End of explanation """ out = manager.process(tasks, out) print(out) """ Explanation: The initialize method primarily allocates memory of input data and puts it on the job instance. Not that if the input is a string pointing to data on disk, initialize will attempt to load the data into memory. If the backend of the manger is threading, keeping the data on the parent process is sufficient for workers to reach it. With multiprocessing as the backend, data will be memory-mapped to avoid serialization. The initialize method returns an out dictionary that specified what type of output we want when running the manager on the assigned job. To run the manager, we call process with out out pointer: End of explanation """ manager.clear() """ Explanation: The output now is a list of arrays, the first contains the same predictions as we got in the map call, the last is the equivalent to the predicitons we got in the stack call. Note that this functionality is available also in the stack and map calls. Memory management When running the manager, it will read and write to memory buffers. This is less of a concern when the threading backend is used, as data is kept in the parent process. But when data is loaded from file path, or when multiprocessing is used, we want to clean up after us. Thus, when we are through with the manager, it is important to call the clear method. This will however destroy any ephemeral data stored on the instance. End of explanation """ learner = Learner(estimator=OLS(), indexer=indexer) with ParallelProcessing() as mananger: manager.stack(learner, 'fit', X, y, split=False) out = manager.stack(learner, 'predict', X, split=False) """ Explanation: ..warning:: The clear method will remove any files in the specified path. If the path specified in the initialize call includes files other than those generated in the process call, these will ALSO be removed. ALWAYS use a clean temporary cache for processing jobs. To minimize the risk of forgetting this last step, the :class:ParallelProcessing class can be used as context manager, automatically cleaning up the cache when exiting the context: End of explanation """
jhaip/livedata-mqtt
notebooks/D3 MQTT 9DOF.ipynb
mit
from IPython.core.display import display, HTML from string import Template import pandas as pd import json, random %%javascript require.config({paths: {d3: "https://d3js.org/d3.v4.min"}}); require(['d3'], function(d3) { window.d3 = d3; }) html_template = Template(''' <svg id="graph-div"></div> <script> $js_text </script> ''') js_text_template = Template(''' var data = $data; var itemWidth = 50; var itemHeight = 50; var chart = d3.select('#graph-div') .attr('width', 800) .attr('height', itemHeight*4); function update() { var row = chart.selectAll('.item') .data(data); var row2 = row.enter() .append('g') .attr("class", "item"); row2.merge(row) .attr('transform', function(d, i){ return 'translate(' + i*(itemWidth+5) + ', ' + itemHeight + ')rotate(' + d + ', ' + itemWidth*0.5 + ', ' + itemHeight*0.5 + ')'; }); row2.append('rect') .attr("width", itemWidth) .attr("height", itemHeight) .attr("fill", "#9999FF") .attr("stroke", "#5555AA"); row2.append("text") .text("Hello"); } update(); ''') data = [0,10,20,30]; js_text = js_text_template.substitute({'data': json.dumps(data)}) HTML(html_template.substitute({'js_text': js_text})) """ Explanation: Step 1: Set up D3 area End of explanation """ interact(update_from_slider, x=widgets.IntSlider(min=0,max=len(data)-10,step=1,value=0)); """ Explanation: Only run the following cell after running everything else. It was only places here so the slider could be next to the visualization. End of explanation """ js_text_template_2 = Template(''' data = $data; update(); console.log("updating"); ''') data = [0,0,0,0,0,0] def update_graph(data): js_text = js_text_template_2.substitute({'data': json.dumps(data)}) display(HTML('<script>' + js_text + '</script>')) update_graph(data) """ Explanation: OK, the D3 area is set up Now we'll focus on live updating. A manual test first. End of explanation """ import paho.mqtt.client as mqtt n_to_save = 10 data = [0 for i in range(n_to_save)] # update_graph(data) def on_connect(client, userdata, flags, rc): print("Connected with result code "+str(rc)) client.subscribe("/outTopic") def on_message(client, userdata, msg): global data try: msg_json = json.loads(msg.payload) except: print("Error") print(msg.topic+" "+str(msg.payload)) return print(msg_json) if msg_json['type'] == "BINARY" and msg_json['label'] == "Or": # data.append((msg_json['X'], msg_json['Y'], msg_json['Z'])) data.append(msg_json['Z']) update_graph(data[-n_to_save:]) print(data[-6:]) client = mqtt.Client() client.on_connect = on_connect client.on_message = on_message client.username_pw_set("zettlmtm", "VOUbRcmhjffA") client.connect("m11.cloudmqtt.com", 19280, 60) client.loop_start() # Make sure to call client.loop_stop() later client.loop_stop() print(data) from time import sleep for i in range(len(data)-10): update_graph(data[i:i+10]) sleep(0.2) """ Explanation: Step 2: Now use MQTT to update the graph Now for the fun stuff. Using the update_graph(data) function set up above. Each group of data, separated by "start" messages, is saved in the tests array. As new data comes in, the live data graph is updated to show data from the last test. All data is neatly saved inside the tests array for replotting and further analysis later. Using the CloudMQTT free Cat plan: * https://api.cloudmqtt.com/sso/cloudmqtt/console * haipjacob@gmail.com:paramour-sieve-paper * If the Websocket UI has trouble connecting, restart the instance End of explanation """ from __future__ import print_function from ipywidgets import interact, interactive, fixed import ipywidgets as widgets def update_from_slider(x): update_graph(data[x:x+10]) interact(update_from_slider, x=widgets.IntSlider(min=0,max=len(data)-10,step=1,value=0)); """ Explanation: Replaying old data! The code in the cell below replays the old data on the graph using the collected data. This could also be a Jupyter widget to allow scrubbing if I knew how that worked. End of explanation """
leizhipeng/ml
titanic_survival_exploration/titanic_survival_exploration.ipynb
gpl-3.0
# Import libraries necessary for this project import numpy as np import pandas as pd from IPython.display import display # Allows the use of display() for DataFrames # Import supplementary visualizations code visuals.py import visuals as vs # Pretty display for notebooks %matplotlib inline # Load the dataset in_file = 'titanic_data.csv' full_data = pd.read_csv(in_file) # Print the first few entries of the RMS Titanic data display(full_data.head()) """ Explanation: Machine Learning Engineer Nanodegree Introduction and Foundations Project: Titanic Survival Exploration In 1912, the ship RMS Titanic struck an iceberg on its maiden voyage and sank, resulting in the deaths of most of its passengers and crew. In this introductory project, we will explore a subset of the RMS Titanic passenger manifest to determine which features best predict whether someone survived or did not survive. To complete this project, you will need to implement several conditional predictions and answer the questions below. Your project submission will be evaluated based on the completion of the code and your responses to the questions. Tip: Quoted sections like this will provide helpful instructions on how to navigate and use an iPython notebook. Getting Started To begin working with the RMS Titanic passenger data, we'll first need to import the functionality we need, and load our data into a pandas DataFrame. Run the code cell below to load our data and display the first few entries (passengers) for examination using the .head() function. Tip: You can run a code cell by clicking on the cell and using the keyboard shortcut Shift + Enter or Shift + Return. Alternatively, a code cell can be executed using the Play button in the hotbar after selecting it. Markdown cells (text cells like this one) can be edited by double-clicking, and saved using these same shortcuts. Markdown allows you to write easy-to-read plain text that can be converted to HTML. End of explanation """ # Store the 'Survived' feature in a new variable and remove it from the dataset outcomes = full_data['Survived'] data = full_data.drop('Survived', axis = 1) # Show the new dataset with 'Survived' removed display(data.head()) """ Explanation: From a sample of the RMS Titanic data, we can see the various features present for each passenger on the ship: - Survived: Outcome of survival (0 = No; 1 = Yes) - Pclass: Socio-economic class (1 = Upper class; 2 = Middle class; 3 = Lower class) - Name: Name of passenger - Sex: Sex of the passenger - Age: Age of the passenger (Some entries contain NaN) - SibSp: Number of siblings and spouses of the passenger aboard - Parch: Number of parents and children of the passenger aboard - Ticket: Ticket number of the passenger - Fare: Fare paid by the passenger - Cabin Cabin number of the passenger (Some entries contain NaN) - Embarked: Port of embarkation of the passenger (C = Cherbourg; Q = Queenstown; S = Southampton) Since we're interested in the outcome of survival for each passenger or crew member, we can remove the Survived feature from this dataset and store it as its own separate variable outcomes. We will use these outcomes as our prediction targets. Run the code cell below to remove Survived as a feature of the dataset and store it in outcomes. End of explanation """ def accuracy_score(truth, pred): """ Returns accuracy score for input truth and predictions. """ # Ensure that the number of predictions matches number of outcomes if len(truth) == len(pred): # Calculate and return the accuracy as a percent return "Predictions have an accuracy of {:.2f}%.".format((truth == pred).mean()*100) else: return "Number of predictions does not match number of outcomes!" # Test the 'accuracy_score' function predictions = pd.Series(np.ones(5, dtype = int)) print accuracy_score(outcomes[:5], predictions) """ Explanation: The very same sample of the RMS Titanic data now shows the Survived feature removed from the DataFrame. Note that data (the passenger data) and outcomes (the outcomes of survival) are now paired. That means for any passenger data.loc[i], they have the survival outcome outcomes[i]. To measure the performance of our predictions, we need a metric to score our predictions against the true outcomes of survival. Since we are interested in how accurate our predictions are, we will calculate the proportion of passengers where our prediction of their survival is correct. Run the code cell below to create our accuracy_score function and test a prediction on the first five passengers. Think: Out of the first five passengers, if we predict that all of them survived, what would you expect the accuracy of our predictions to be? End of explanation """ def predictions_0(data): """ Model with no features. Always predicts a passenger did not survive. """ predictions = [] for _, passenger in data.iterrows(): # Predict the survival of 'passenger' predictions.append(0) # Return our predictions return pd.Series(predictions) # Make the predictions predictions = predictions_0(data) """ Explanation: Tip: If you save an iPython Notebook, the output from running code blocks will also be saved. However, the state of your workspace will be reset once a new session is started. Make sure that you run all of the code blocks from your previous session to reestablish variables and functions before picking up where you last left off. Making Predictions If we were asked to make a prediction about any passenger aboard the RMS Titanic whom we knew nothing about, then the best prediction we could make would be that they did not survive. This is because we can assume that a majority of the passengers (more than 50%) did not survive the ship sinking. The predictions_0 function below will always predict that a passenger did not survive. End of explanation """ print accuracy_score(outcomes, predictions) """ Explanation: Question 1 Using the RMS Titanic data, how accurate would a prediction be that none of the passengers survived? Hint: Run the code cell below to see the accuracy of this prediction. End of explanation """ vs.survival_stats(data, outcomes, 'Sex') """ Explanation: Answer: Predictions have an accuracy of 61.62%. Let's take a look at whether the feature Sex has any indication of survival rates among passengers using the survival_stats function. This function is defined in the visuals.py Python script included with this project. The first two parameters passed to the function are the RMS Titanic data and passenger survival outcomes, respectively. The third parameter indicates which feature we want to plot survival statistics across. Run the code cell below to plot the survival outcomes of passengers based on their sex. End of explanation """ def predictions_1(data): """ Model with one feature: - Predict a passenger survived if they are female. """ predictions = [] for _, passenger in data.iterrows(): # Remove the 'pass' statement below # and write your prediction conditions here if passenger['Sex'] == "female": predictions.append(1) else: predictions.append(0) # Return our predictions return pd.Series(predictions) # Make the predictions predictions = predictions_1(data) """ Explanation: Examining the survival statistics, a large majority of males did not survive the ship sinking. However, a majority of females did survive the ship sinking. Let's build on our previous prediction: If a passenger was female, then we will predict that they survived. Otherwise, we will predict the passenger did not survive. Fill in the missing code below so that the function will make this prediction. Hint: You can access the values of each feature for a passenger like a dictionary. For example, passenger['Sex'] is the sex of the passenger. End of explanation """ print accuracy_score(outcomes, predictions) """ Explanation: Question 2 How accurate would a prediction be that all female passengers survived and the remaining passengers did not survive? Hint: Run the code cell below to see the accuracy of this prediction. End of explanation """ vs.survival_stats(data, outcomes, 'Age', ["Sex == 'male'"]) """ Explanation: Answer: Predictions have an accuracy of 78.68%. Using just the Sex feature for each passenger, we are able to increase the accuracy of our predictions by a significant margin. Now, let's consider using an additional feature to see if we can further improve our predictions. For example, consider all of the male passengers aboard the RMS Titanic: Can we find a subset of those passengers that had a higher rate of survival? Let's start by looking at the Age of each male, by again using the survival_stats function. This time, we'll use a fourth parameter to filter out the data so that only passengers with the Sex 'male' will be included. Run the code cell below to plot the survival outcomes of male passengers based on their age. End of explanation """ def predictions_2(data): """ Model with two features: - Predict a passenger survived if they are female. - Predict a passenger survived if they are male and younger than 10. """ predictions = [] for _, passenger in data.iterrows(): # Remove the 'pass' statement below # and write your prediction conditions here if passenger['Sex'] == "female" or passenger['Age'] < 10: predictions.append(1) else: predictions.append(0) # Return our predictions return pd.Series(predictions) # Make the predictions predictions = predictions_2(data) """ Explanation: Examining the survival statistics, the majority of males younger than 10 survived the ship sinking, whereas most males age 10 or older did not survive the ship sinking. Let's continue to build on our previous prediction: If a passenger was female, then we will predict they survive. If a passenger was male and younger than 10, then we will also predict they survive. Otherwise, we will predict they do not survive. Fill in the missing code below so that the function will make this prediction. Hint: You can start your implementation of this function using the prediction code you wrote earlier from predictions_1. End of explanation """ print accuracy_score(outcomes, predictions) """ Explanation: Question 3 How accurate would a prediction be that all female passengers and all male passengers younger than 10 survived? Hint: Run the code cell below to see the accuracy of this prediction. End of explanation """ vs.survival_stats(data, outcomes, 'Age', ["Sex == 'female'","SibSp == 3"]) """ Explanation: Answer: Predictions have an accuracy of 79.35%. Adding the feature Age as a condition in conjunction with Sex improves the accuracy by a small margin more than with simply using the feature Sex alone. Now it's your turn: Find a series of features and conditions to split the data on to obtain an outcome prediction accuracy of at least 80%. This may require multiple features and multiple levels of conditional statements to succeed. You can use the same feature multiple times with different conditions. Pclass, Sex, Age, SibSp, and Parch are some suggested features to try. Use the survival_stats function below to to examine various survival statistics. Hint: To use mulitple filter conditions, put each condition in the list passed as the last argument. Example: ["Sex == 'male'", "Age &lt; 18"] End of explanation """ def predictions_3(data): """ Model with multiple features. Makes a prediction with an accuracy of at least 80%. """ predictions = [] for _, passenger in data.iterrows(): """ Model with features of Sex, Age, Pclass, and SibSp. """ if passenger['Sex'] == "female": if passenger['Pclass'] == 3 and passenger['Age']>=40 and passenger['Age']<60: predictions.append(0) elif passenger['SibSp'] == 3 and passenger['Age'] <= 10: predictions.append(0) else: predictions.append(1) else: if passenger['Age'] < 10: predictions.append(1) elif passenger['Pclass'] == 1 and passenger['Age']>=20 and passenger['Age'] < 40: predictions.append(1) else: predictions.append(0) # Return our predictions return pd.Series(predictions) # Make the predictions predictions = predictions_3(data) """ Explanation: After exploring the survival statistics visualization, fill in the missing code below so that the function will make your prediction. Make sure to keep track of the various features and conditions you tried before arriving at your final prediction model. Hint: You can start your implementation of this function using the prediction code you wrote earlier from predictions_2. End of explanation """ print accuracy_score(outcomes, predictions) """ Explanation: Question 4 Describe the steps you took to implement the final prediction model so that it got an accuracy of at least 80%. What features did you look at? Were certain features more informative than others? Which conditions did you use to split the survival outcomes in the data? How accurate are your predictions? Hint: Run the code cell below to see the accuracy of your predictions. End of explanation """
pagutierrez/tutorial-sklearn
notebooks-spanish/05-aprendizaje_supervisado_clasificacion.ipynb
cc0-1.0
from sklearn.datasets import make_blobs X, y = make_blobs(centers=2, random_state=0) print('X ~ n_samples x n_features:', X.shape) print('y ~ n_samples:', y.shape) print('\n5 primeros ejemplos:\n', X[:5, :]) print('\n5 primeras etiquetas:', y[:5]) """ Explanation: Aprendizaje supervisado parte 1 -- Clasificación Para visualizar como funcionan los algoritmos de aprendizaje automático, es mejor considerar datos de una o dos dimensiones, esto es datasets con solo una o dos características. Aunque, en la práctica los datasets tienen muchas más características, es difícil representar datos de alta dimensionalidad en pantallas 2D. Vamos a ilustrar ejemplos muy simples antes de comenzar con datasets del mundo real. Primero, vamos a inspeccionar un problema de clasificación binaria con dos dimensiones. Utilizaremos los datos sintéticos que nos proporciona la función make_blobs. End of explanation """ plt.scatter(X[y == 0, 0], X[y == 0, 1], c='blue', s=40, label='0') plt.scatter(X[y == 1, 0], X[y == 1, 1], c='red', s=40, label='1', marker='s') plt.xlabel('primera característica') plt.ylabel('segunda característica') plt.legend(loc='upper right'); """ Explanation: Como los datos son bidimensionales, podemos representar cada punto en un sistema de coordenadas (ejes x e y). End of explanation """ from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=1234, stratify=y) """ Explanation: La clasificación es una tarea supervisada y, ya que estamos interesados en su rendimiento en datos no utilizados para entrenar, vamos a dividir los datos en dos partes: un conjunto de entrenamiento que el algoritmo de aprendizaje utiliza para ajustar los parámetros del modelo un conjunto de test para evaluar la capacidad de generalización del modelo La función train_test_split del paquete model_selection hace justo esto por nosotros - la usaremos para generar una partición con un 75%//25% en entrenamiento y test, respectivamente. <img src="figures/train_test_split_matrix.svg" width="100%"> End of explanation """ from sklearn.linear_model import LogisticRegression """ Explanation: El API de un estimador de scikit-learn <img src="figures/supervised_workflow.svg" width="100%"> Cualquier algoritmo de scikit-learn se maneja a través de una interfaz denominada ''Estimator'' (una de las ventajas de scikit-learn es que todos los modelos y algoritmos tienen una interfaz consistente). Por ejemplo, importamos la clase correspondiente al algoritmo de regresión logística: End of explanation """ classifier = LogisticRegression() X_train.shape y_train.shape """ Explanation: Ahora, instanciamos el estimador: End of explanation """ classifier.fit(X_train, y_train) """ Explanation: Para construir el modelo a partir de nuestros datos, esto es, aprender a clasificar nuevos puntos, llamamos a la función fit pasándole los datos de entrenamiento, y las etiquetas correspondientes (la salida deseada para los datos de entrenamiento): End of explanation """ prediction = classifier.predict(X_test) """ Explanation: Algunos métodos de los estimadores se devuelven a sí mismos por defecto. Esto es, después de ejecutar el código anterior, verás los parámetros por defecto de esta instancia particular de LogisticRegression. Otra forma de obtener los parámetros de inicialización de un estimador es usar classifier.get_params(), que devuelve un diccionario de parámetros. Podemos aplicar el modelo a datos no utilizados anteriormente para predecir la respuesta estimada mediante el método predict: End of explanation """ print(prediction) print(y_test) """ Explanation: Podemos comparar el resultado con las etiquetas reales: End of explanation """ np.mean(prediction == y_test) """ Explanation: Podemos evaluar nuestro modelo cuantitativamente utilizando la proporción de patrones correctos. A esto se le llama accuracy: End of explanation """ classifier.score(X_test, y_test) """ Explanation: Existe una función útil, score, que incluyen todos los clasificadores de scikit-learn para obtener la medida de rendimiento a partir de los datos de test: End of explanation """ classifier.score(X_train, y_train) """ Explanation: A veces es útil comparar el rendimiento en generalización (en el conjunto de test) con el rendimiento en entrenamiento: End of explanation """ from figures import plot_2d_separator plt.scatter(X[y == 0, 0], X[y == 0, 1], c='blue', s=40, label='0') plt.scatter(X[y == 1, 0], X[y == 1, 1], c='red', s=40, label='1', marker='s') plt.xlabel("primera característica") plt.ylabel("segunda característica") plot_2d_separator(classifier, X) plt.legend(loc='upper right'); """ Explanation: LogisticRegression es un modelo lineal, lo que significa que creará una frontera de decisión que es lineal en el espacio de entrada. En 2D, esto quiere decir que generará una línea recta para separar los puntos azules de los rojos: End of explanation """ print(classifier.coef_) print(classifier.intercept_) """ Explanation: Parámetros estimados: todos los parámetros estimados del modelo son atributos del objeto estimador cuyo nombre termina en guión bajo. Para la regresión logística, serían los coeficientes y la coordenada en el origen de la línea: End of explanation """ from sklearn.neighbors import KNeighborsClassifier """ Explanation: Otro clasificador: K Nearest Neighbors Otro clasificador popular y fácil de entender es el k Nearest Neighbors (kNN). Implementa una de las estrategias más simples de aprendizaje (de hecho, en realidad no aprende): dado un nuevo ejemplo desconocido, buscar en la base de datos de referencia (entrenamiento) aquellos ejemplos que tengan características más parecidas y asignarle la clase predominante. La interfaz es exactamente la misma que para LogisticRegression. End of explanation """ knn = KNeighborsClassifier(n_neighbors=20) """ Explanation: Ahora vamos a modificar un parámetro de KNeighborsClassifier para que solo se examine el vecino más cercano: End of explanation """ knn.fit(X_train, y_train) plt.scatter(X[y == 0, 0], X[y == 0, 1], c='blue', s=40, label='0') plt.scatter(X[y == 1, 0], X[y == 1, 1], c='red', s=40, label='1', marker='s') plt.xlabel("primera característica") plt.ylabel("segunda característica") plot_2d_separator(knn, X) plt.legend(loc='upper right'); knn.score(X_test, y_test) """ Explanation: Ajustamos el modelo con nuestros datos de entrenamiento. End of explanation """
elastic/examples
Machine Learning/Query Optimization/notebooks/2 - Query tuning - best_fields.ipynb
apache-2.0
%load_ext autoreload %autoreload 2 import importlib import os import sys from elasticsearch import Elasticsearch from skopt.plots import plot_objective # project library sys.path.insert(0, os.path.abspath('..')) import qopt importlib.reload(qopt) from qopt.notebooks import evaluate_mrr100_dev, optimize_query_mrr100 from qopt.optimize import Config # use a local Elasticsearch or Cloud instance (https://cloud.elastic.co/) es = Elasticsearch('http://localhost:9200') # set the parallelization parameter `max_concurrent_searches` for the Rank Evaluation API calls max_concurrent_searches = 10 # max_concurrent_searches = 30 index = 'msmarco-document' template_id = 'best_fields' """ Explanation: Tuning a multi_match best_fields query The following assumes familiarity with the first notebook "Query tuning". The first query type we used was a multi_match cross_fields query. This searches for query terms individually across each of the three document fields. For example, given a query string "impact of the success of the manhattan project", we search for each of the query terms in each of the fields. So we could have "impact" matching the body only, while "manhattan" could match all three fields. Due to the nature of the queries which are all questions, this might not be the best query type to use. In this step, we're going to try using the multi_match query of type best_fields, which is the default query type for multi_match. This variant will look across fields but will only return the field and score with the best matches. We will also experiment in this query with modifying a few parameters of the query that are sometimes hard to guess at. Specifically, we'll explore which field boosts to use for each of our three fields and also which tie_breaker parameter to use. End of explanation """ _ = evaluate_mrr100_dev(es, max_concurrent_searches, index, template_id, params={ 'tie_breaker': 0.0, 'url|boost': 1.0, 'title|boost': 1.0, 'body|boost': 1.0, }) """ Explanation: Baseline evaluation End of explanation """ %%time _, _, final_params_best_fields, metadata_best_fields = optimize_query_mrr100(es, max_concurrent_searches, index, template_id, config_space=Config.parse({ 'method': 'bayesian', 'num_iterations': 100, 'num_initial_points': 40, 'space': { 'tie_breaker': { 'low': 0.0, 'high': 1.0 }, 'url|boost': { 'low': 0.0, 'high': 10.0 }, 'title|boost': { 'low': 0.0, 'high': 10.0 }, 'body|boost': { 'low': 0.0, 'high': 10.0 }, }, })) _ = plot_objective(metadata_best_fields, sample_source='result') %%time _ = evaluate_mrr100_dev(es, max_concurrent_searches, index, template_id, params=final_params_best_fields) """ Explanation: That's pretty impressive for the baseline query. It beats our baseline cross_fields query but not quite the optimized one. Query tuning Let's try and optimize this best_fields query now. We'll put all the parameters into a single parameter space since there's only four. We'll use Bayesian optimization again to find the optimal parameters, with a fairly large number of iterations to make sure we test out a good portion of the parameter space. End of explanation """
DJCordhose/ai
notebooks/rl/berater-v8.ipynb
mit
!pip install git+https://github.com/openai/baselines >/dev/null !pip install gym >/dev/null """ Explanation: <a href="https://colab.research.google.com/github/DJCordhose/ai/blob/master/notebooks/rl/berater-v8.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> Berater Environment v8 Changes from v7 return to complete observation gives better results not listing all paths, but local paths plus all rest rewards next steps configure custom network including regularization (https://blog.openai.com/quantifying-generalization-in-reinforcement-learning/) better rewards set discount factor (gamma) to 1 rewards late in the game are as good as eartly ones no need to push game to an end, as every move comes at costs anyway add reward for returning home once all other locations have been visited better observation? network can learn all costs and all connections as they are static rewards are not, but are given in the observation all information is there, but it is very convoluted, too hard for us as humans could we make this more accessible? Would this also help? create baselines to better understand what is a good result low level: always go in the direction of greatest reward Dijkstra Installation (required for colab) End of explanation """ import numpy as np import random import gym from gym.utils import seeding from gym import spaces def state_name_to_int(state): state_name_map = { 'S': 0, 'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7, 'H': 8, 'K': 9, 'L': 10, 'M': 11, 'N': 12, 'O': 13 } return state_name_map[state] def int_to_state_name(state_as_int): state_map = { 0: 'S', 1: 'A', 2: 'B', 3: 'C', 4: 'D', 5: 'E', 6: 'F', 7: 'G', 8: 'H', 9: 'K', 10: 'L', 11: 'M', 12: 'N', 13: 'O' } return state_map[state_as_int] class BeraterEnv(gym.Env): """ The Berater Problem Actions: There are 4 discrete deterministic actions, each choosing one direction """ metadata = {'render.modes': ['ansi']} showStep = False showDone = True envEpisodeModulo = 100 def __init__(self): # self.map = { # 'S': [('A', 100), ('B', 400), ('C', 200 )], # 'A': [('B', 250), ('C', 400), ('S', 100 )], # 'B': [('A', 250), ('C', 250), ('S', 400 )], # 'C': [('A', 400), ('B', 250), ('S', 200 )] # } self.map = { 'S': [('A', 300), ('B', 100), ('C', 200 )], 'A': [('S', 300), ('B', 100), ('E', 100 ), ('D', 100 )], 'B': [('S', 100), ('A', 100), ('C', 50 ), ('K', 200 )], 'C': [('S', 200), ('B', 50), ('M', 100 ), ('L', 200 )], 'D': [('A', 100), ('F', 50)], 'E': [('A', 100), ('F', 100), ('H', 100)], 'F': [('D', 50), ('E', 100), ('G', 200)], 'G': [('F', 200), ('O', 300)], 'H': [('E', 100), ('K', 300)], 'K': [('B', 200), ('H', 300)], 'L': [('C', 200), ('M', 50)], 'M': [('C', 100), ('L', 50), ('N', 100)], 'N': [('M', 100), ('O', 100)], 'O': [('N', 100), ('G', 300)] } max_paths = 4 self.action_space = spaces.Discrete(max_paths) positions = len(self.map) # observations: position, reward of all 4 local paths, rest reward of all locations # non existing path is -1000 and no position change # look at what #getObservation returns if you are confused low = np.append(np.append([0], np.full(max_paths, -1000)), np.full(positions, 0)) high = np.append(np.append([positions - 1], np.full(max_paths, 1000)), np.full(positions, 1000)) self.observation_space = spaces.Box(low=low, high=high, dtype=np.float32) self.reward_range = (-1, 1) self.totalReward = 0 self.stepCount = 0 self.isDone = False self.envReward = 0 self.envEpisodeCount = 0 self.envStepCount = 0 self.reset() self.optimum = self.calculate_customers_reward() def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] def iterate_path(self, state, action): paths = self.map[state] if action < len(paths): return paths[action] else: # sorry, no such action, stay where you are and pay a high penalty return (state, 1000) def step(self, action): destination, cost = self.iterate_path(self.state, action) lastState = self.state customerReward = self.customer_reward[destination] reward = (customerReward - cost) / self.optimum self.state = destination self.customer_visited(destination) done = destination == 'S' and self.all_customers_visited() stateAsInt = state_name_to_int(self.state) self.totalReward += reward self.stepCount += 1 self.envReward += reward self.envStepCount += 1 if self.showStep: print( "Episode: " + ("%4.0f " % self.envEpisodeCount) + " Step: " + ("%4.0f " % self.stepCount) + lastState + ' --' + str(action) + '-> ' + self.state + ' R=' + ("% 2.2f" % reward) + ' totalR=' + ("% 3.2f" % self.totalReward) + ' cost=' + ("%4.0f" % cost) + ' customerR=' + ("%4.0f" % customerReward) + ' optimum=' + ("%4.0f" % self.optimum) ) if done and not self.isDone: self.envEpisodeCount += 1 if BeraterEnv.showDone: episodes = BeraterEnv.envEpisodeModulo if (self.envEpisodeCount % BeraterEnv.envEpisodeModulo != 0): episodes = self.envEpisodeCount % BeraterEnv.envEpisodeModulo print( "Done: " + ("episodes=%6.0f " % self.envEpisodeCount) + ("avgSteps=%6.2f " % (self.envStepCount/episodes)) + ("avgTotalReward=% 3.2f" % (self.envReward/episodes) ) ) if (self.envEpisodeCount%BeraterEnv.envEpisodeModulo) == 0: self.envReward = 0 self.envStepCount = 0 self.isDone = done observation = self.getObservation(stateAsInt) info = {"from": self.state, "to": destination} return observation, reward, done, info def getObservation(self, position): result = np.array([ position, self.getPathObservation(position, 0), self.getPathObservation(position, 1), self.getPathObservation(position, 2), self.getPathObservation(position, 3) ], dtype=np.float32) all_rest_rewards = list(self.customer_reward.values()) result = np.append(result, all_rest_rewards) return result def getPathObservation(self, position, path): source = int_to_state_name(position) paths = self.map[self.state] if path < len(paths): target, cost = paths[path] reward = self.customer_reward[target] result = reward - cost else: result = -1000 return result def customer_visited(self, customer): self.customer_reward[customer] = 0 def all_customers_visited(self): return self.calculate_customers_reward() == 0 def calculate_customers_reward(self): sum = 0 for value in self.customer_reward.values(): sum += value return sum def modulate_reward(self): number_of_customers = len(self.map) - 1 number_per_consultant = int(number_of_customers/2) # number_per_consultant = int(number_of_customers/1.5) self.customer_reward = { 'S': 0 } for customer_nr in range(1, number_of_customers + 1): self.customer_reward[int_to_state_name(customer_nr)] = 0 # every consultant only visits a few random customers samples = random.sample(range(1, number_of_customers + 1), k=number_per_consultant) key_list = list(self.customer_reward.keys()) for sample in samples: self.customer_reward[key_list[sample]] = 1000 def reset(self): self.totalReward = 0 self.stepCount = 0 self.isDone = False self.modulate_reward() self.state = 'S' return self.getObservation(state_name_to_int(self.state)) def render(self): print(self.customer_reward) env = BeraterEnv() print(env.reset()) print(env.customer_reward) """ Explanation: Environment End of explanation """ BeraterEnv.showStep = True BeraterEnv.showDone = True env = BeraterEnv() print(env) observation = env.reset() print(observation) for t in range(1000): action = env.action_space.sample() observation, reward, done, info = env.step(action) if done: print("Episode finished after {} timesteps".format(t+1)) break env.close() print(observation) """ Explanation: Try out Environment End of explanation """ import tensorflow as tf tf.logging.set_verbosity(tf.logging.ERROR) print(tf.__version__) !rm -r logs !mkdir logs !mkdir logs/berater # https://github.com/openai/baselines/blob/master/baselines/deepq/experiments/train_pong.py # log_dir = logger.get_dir() log_dir = '/content/logs/berater/' import gym from baselines import bench from baselines import logger from baselines.common.vec_env.dummy_vec_env import DummyVecEnv from baselines.common.vec_env.vec_monitor import VecMonitor from baselines.ppo2 import ppo2 BeraterEnv.showStep = False BeraterEnv.showDone = False env = BeraterEnv() wrapped_env = DummyVecEnv([lambda: BeraterEnv()]) monitored_env = VecMonitor(wrapped_env, log_dir) # https://github.com/openai/baselines/blob/master/baselines/ppo2/ppo2.py # https://github.com/openai/baselines/blob/master/baselines/common/models.py#L30 %time model = ppo2.learn(\ env=monitored_env,\ network='mlp',\ num_hidden=5000,\ num_layers=3,\ ent_coef=0.01,\ total_timesteps=500000) # %time model = ppo2.learn(\ # env=monitored_env,\ # network='mlp',\ # num_hidden=2000,\ # num_layers=3,\ # ent_coef=0.1,\ # total_timesteps=500000) # model = ppo2.learn( # env=monitored_env,\ # layer_norm=True,\ # network='mlp',\ # num_hidden=2000,\ # activation=tf.nn.relu,\ # num_layers=3,\ # ent_coef=0.03,\ # total_timesteps=1000000) # monitored_env = bench.Monitor(env, log_dir) # https://en.wikipedia.org/wiki/Q-learning#Influence_of_variables # %time model = deepq.learn(\ # monitored_env,\ # seed=42,\ # network='mlp',\ # lr=1e-3,\ # gamma=0.99,\ # total_timesteps=30000,\ # buffer_size=50000,\ # exploration_fraction=0.5,\ # exploration_final_eps=0.02,\ # print_freq=1000) model.save('berater-ppo-v7.pkl') monitored_env.close() """ Explanation: Train model random has lower total reward than version with dense customers total cost when travelling all paths (back and forth): 2500 additional pernalty for liiegal moves 1000 all rewards: 6000 perfect score??? estimate: half the travel cost and no illegal moves: (6000 - 1250) / 6000 = .79 but: rewards are much more sparse while routes stay the same, maybe expect less additionally: the agent only sees very little of the whole scenario changes with every episode was ok when network can learn fixed scenario End of explanation """ # !ls -l $log_dir from baselines.common import plot_util as pu results = pu.load_results(log_dir) import matplotlib.pyplot as plt import numpy as np r = results[0] plt.ylim(0, .75) # plt.plot(np.cumsum(r.monitor.l), r.monitor.r) plt.plot(np.cumsum(r.monitor.l), pu.smooth(r.monitor.r, radius=100)) """ Explanation: Visualizing Results https://github.com/openai/baselines/blob/master/docs/viz/viz.ipynb End of explanation """ import numpy as np observation = env.reset() env.render() state = np.zeros((1, 2*128)) dones = np.zeros((1)) BeraterEnv.showStep = True BeraterEnv.showDone = False for t in range(1000): actions, _, state, _ = model.step(observation, S=state, M=dones) observation, reward, done, info = env.step(actions[0]) if done: print("Episode finished after {} timesteps".format(t+1)) break env.close() """ Explanation: Enjoy model End of explanation """